Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 5354 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
5097 serge 27
#include 
2327 Serge 28
#include 
29
//#include 
30
#include 
31
#include 
2330 Serge 32
#include 
3746 Serge 33
#include 
2342 Serge 34
#include 
3031 serge 35
#include 
2327 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2327 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
3031 serge 40
#include 
41
#include 
5060 serge 42
#include 
43
#include 
44
#include 
2327 Serge 45
 
5060 serge 46
static inline void ndelay(unsigned long x)
47
{
48
    udelay(DIV_ROUND_UP(x, 1000));
49
}
50
 
51
/* Primary plane formats supported by all gen */
52
#define COMMON_PRIMARY_FORMATS \
53
	DRM_FORMAT_C8, \
54
	DRM_FORMAT_RGB565, \
55
	DRM_FORMAT_XRGB8888, \
56
	DRM_FORMAT_ARGB8888
57
 
58
/* Primary plane formats for gen <= 3 */
59
static const uint32_t intel_primary_formats_gen2[] = {
60
	COMMON_PRIMARY_FORMATS,
61
	DRM_FORMAT_XRGB1555,
62
	DRM_FORMAT_ARGB1555,
63
};
64
 
65
/* Primary plane formats for gen >= 4 */
66
static const uint32_t intel_primary_formats_gen4[] = {
67
	COMMON_PRIMARY_FORMATS, \
68
	DRM_FORMAT_XBGR8888,
69
	DRM_FORMAT_ABGR8888,
70
	DRM_FORMAT_XRGB2101010,
71
	DRM_FORMAT_ARGB2101010,
72
	DRM_FORMAT_XBGR2101010,
73
	DRM_FORMAT_ABGR2101010,
74
};
75
 
76
/* Cursor formats */
77
static const uint32_t intel_cursor_formats[] = {
78
	DRM_FORMAT_ARGB8888,
79
};
80
 
81
#define DIV_ROUND_CLOSEST_ULL(ll, d)	\
82
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
83
 
4104 Serge 84
#define MAX_ERRNO       4095
2327 Serge 85
phys_addr_t get_bus_addr(void);
86
 
4560 Serge 87
static inline void outb(u8 v, u16 port)
88
{
89
    asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
90
}
91
static inline u8 inb(u16 port)
92
{
93
    u8 v;
94
    asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
95
    return v;
96
}
97
 
5060 serge 98
static void intel_increase_pllclock(struct drm_device *dev,
99
				    enum pipe pipe);
100
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
2327 Serge 101
 
4104 Serge 102
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
103
				struct intel_crtc_config *pipe_config);
4560 Serge 104
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
4104 Serge 105
				    struct intel_crtc_config *pipe_config);
2327 Serge 106
 
4104 Serge 107
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
108
			  int x, int y, struct drm_framebuffer *old_fb);
5060 serge 109
static int intel_framebuffer_init(struct drm_device *dev,
110
				  struct intel_framebuffer *ifb,
111
				  struct drm_mode_fb_cmd2 *mode_cmd,
112
				  struct drm_i915_gem_object *obj);
113
static void intel_dp_set_m_n(struct intel_crtc *crtc);
114
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
115
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
116
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
117
					 struct intel_link_m_n *m_n);
118
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
119
static void haswell_set_pipeconf(struct drm_crtc *crtc);
120
static void intel_set_pipe_csc(struct drm_crtc *crtc);
121
static void vlv_prepare_pll(struct intel_crtc *crtc);
4104 Serge 122
 
5060 serge 123
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
124
{
125
	if (!connector->mst_port)
126
		return connector->encoder;
127
	else
128
		return &connector->mst_port->mst_encoders[pipe]->base;
129
}
4104 Serge 130
 
2327 Serge 131
typedef struct {
132
    int min, max;
133
} intel_range_t;
134
 
135
typedef struct {
136
    int dot_limit;
137
    int p2_slow, p2_fast;
138
} intel_p2_t;
139
 
140
typedef struct intel_limit intel_limit_t;
141
struct intel_limit {
142
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
143
    intel_p2_t      p2;
144
};
145
 
3243 Serge 146
int
147
intel_pch_rawclk(struct drm_device *dev)
148
{
149
	struct drm_i915_private *dev_priv = dev->dev_private;
150
 
151
	WARN_ON(!HAS_PCH_SPLIT(dev));
152
 
153
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
154
}
155
 
2327 Serge 156
static inline u32 /* units of 100MHz */
157
intel_fdi_link_freq(struct drm_device *dev)
158
{
159
	if (IS_GEN5(dev)) {
160
		struct drm_i915_private *dev_priv = dev->dev_private;
161
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
162
	} else
163
		return 27;
164
}
165
 
4104 Serge 166
static const intel_limit_t intel_limits_i8xx_dac = {
167
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 168
	.vco = { .min = 908000, .max = 1512000 },
169
	.n = { .min = 2, .max = 16 },
4104 Serge 170
	.m = { .min = 96, .max = 140 },
171
	.m1 = { .min = 18, .max = 26 },
172
	.m2 = { .min = 6, .max = 16 },
173
	.p = { .min = 4, .max = 128 },
174
	.p1 = { .min = 2, .max = 33 },
175
	.p2 = { .dot_limit = 165000,
176
		.p2_slow = 4, .p2_fast = 2 },
177
};
178
 
2327 Serge 179
static const intel_limit_t intel_limits_i8xx_dvo = {
180
        .dot = { .min = 25000, .max = 350000 },
4560 Serge 181
	.vco = { .min = 908000, .max = 1512000 },
182
	.n = { .min = 2, .max = 16 },
2327 Serge 183
        .m = { .min = 96, .max = 140 },
184
        .m1 = { .min = 18, .max = 26 },
185
        .m2 = { .min = 6, .max = 16 },
186
        .p = { .min = 4, .max = 128 },
187
        .p1 = { .min = 2, .max = 33 },
188
	.p2 = { .dot_limit = 165000,
4104 Serge 189
		.p2_slow = 4, .p2_fast = 4 },
2327 Serge 190
};
191
 
192
static const intel_limit_t intel_limits_i8xx_lvds = {
193
        .dot = { .min = 25000, .max = 350000 },
4560 Serge 194
	.vco = { .min = 908000, .max = 1512000 },
195
	.n = { .min = 2, .max = 16 },
2327 Serge 196
        .m = { .min = 96, .max = 140 },
197
        .m1 = { .min = 18, .max = 26 },
198
        .m2 = { .min = 6, .max = 16 },
199
        .p = { .min = 4, .max = 128 },
200
        .p1 = { .min = 1, .max = 6 },
201
	.p2 = { .dot_limit = 165000,
202
		.p2_slow = 14, .p2_fast = 7 },
203
};
204
 
205
static const intel_limit_t intel_limits_i9xx_sdvo = {
206
        .dot = { .min = 20000, .max = 400000 },
207
        .vco = { .min = 1400000, .max = 2800000 },
208
        .n = { .min = 1, .max = 6 },
209
        .m = { .min = 70, .max = 120 },
3480 Serge 210
	.m1 = { .min = 8, .max = 18 },
211
	.m2 = { .min = 3, .max = 7 },
2327 Serge 212
        .p = { .min = 5, .max = 80 },
213
        .p1 = { .min = 1, .max = 8 },
214
	.p2 = { .dot_limit = 200000,
215
		.p2_slow = 10, .p2_fast = 5 },
216
};
217
 
218
static const intel_limit_t intel_limits_i9xx_lvds = {
219
        .dot = { .min = 20000, .max = 400000 },
220
        .vco = { .min = 1400000, .max = 2800000 },
221
        .n = { .min = 1, .max = 6 },
222
        .m = { .min = 70, .max = 120 },
3480 Serge 223
	.m1 = { .min = 8, .max = 18 },
224
	.m2 = { .min = 3, .max = 7 },
2327 Serge 225
        .p = { .min = 7, .max = 98 },
226
        .p1 = { .min = 1, .max = 8 },
227
	.p2 = { .dot_limit = 112000,
228
		.p2_slow = 14, .p2_fast = 7 },
229
};
230
 
231
 
232
static const intel_limit_t intel_limits_g4x_sdvo = {
233
	.dot = { .min = 25000, .max = 270000 },
234
	.vco = { .min = 1750000, .max = 3500000},
235
	.n = { .min = 1, .max = 4 },
236
	.m = { .min = 104, .max = 138 },
237
	.m1 = { .min = 17, .max = 23 },
238
	.m2 = { .min = 5, .max = 11 },
239
	.p = { .min = 10, .max = 30 },
240
	.p1 = { .min = 1, .max = 3},
241
	.p2 = { .dot_limit = 270000,
242
		.p2_slow = 10,
243
		.p2_fast = 10
244
	},
245
};
246
 
247
static const intel_limit_t intel_limits_g4x_hdmi = {
248
	.dot = { .min = 22000, .max = 400000 },
249
	.vco = { .min = 1750000, .max = 3500000},
250
	.n = { .min = 1, .max = 4 },
251
	.m = { .min = 104, .max = 138 },
252
	.m1 = { .min = 16, .max = 23 },
253
	.m2 = { .min = 5, .max = 11 },
254
	.p = { .min = 5, .max = 80 },
255
	.p1 = { .min = 1, .max = 8},
256
	.p2 = { .dot_limit = 165000,
257
		.p2_slow = 10, .p2_fast = 5 },
258
};
259
 
260
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
261
	.dot = { .min = 20000, .max = 115000 },
262
	.vco = { .min = 1750000, .max = 3500000 },
263
	.n = { .min = 1, .max = 3 },
264
	.m = { .min = 104, .max = 138 },
265
	.m1 = { .min = 17, .max = 23 },
266
	.m2 = { .min = 5, .max = 11 },
267
	.p = { .min = 28, .max = 112 },
268
	.p1 = { .min = 2, .max = 8 },
269
	.p2 = { .dot_limit = 0,
270
		.p2_slow = 14, .p2_fast = 14
271
	},
272
};
273
 
274
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
275
	.dot = { .min = 80000, .max = 224000 },
276
	.vco = { .min = 1750000, .max = 3500000 },
277
	.n = { .min = 1, .max = 3 },
278
	.m = { .min = 104, .max = 138 },
279
	.m1 = { .min = 17, .max = 23 },
280
	.m2 = { .min = 5, .max = 11 },
281
	.p = { .min = 14, .max = 42 },
282
	.p1 = { .min = 2, .max = 6 },
283
	.p2 = { .dot_limit = 0,
284
		.p2_slow = 7, .p2_fast = 7
285
	},
286
};
287
 
288
static const intel_limit_t intel_limits_pineview_sdvo = {
289
        .dot = { .min = 20000, .max = 400000},
290
        .vco = { .min = 1700000, .max = 3500000 },
291
	/* Pineview's Ncounter is a ring counter */
292
        .n = { .min = 3, .max = 6 },
293
        .m = { .min = 2, .max = 256 },
294
	/* Pineview only has one combined m divider, which we treat as m2. */
295
        .m1 = { .min = 0, .max = 0 },
296
        .m2 = { .min = 0, .max = 254 },
297
        .p = { .min = 5, .max = 80 },
298
        .p1 = { .min = 1, .max = 8 },
299
	.p2 = { .dot_limit = 200000,
300
		.p2_slow = 10, .p2_fast = 5 },
301
};
302
 
303
static const intel_limit_t intel_limits_pineview_lvds = {
304
        .dot = { .min = 20000, .max = 400000 },
305
        .vco = { .min = 1700000, .max = 3500000 },
306
        .n = { .min = 3, .max = 6 },
307
        .m = { .min = 2, .max = 256 },
308
        .m1 = { .min = 0, .max = 0 },
309
        .m2 = { .min = 0, .max = 254 },
310
        .p = { .min = 7, .max = 112 },
311
        .p1 = { .min = 1, .max = 8 },
312
	.p2 = { .dot_limit = 112000,
313
		.p2_slow = 14, .p2_fast = 14 },
314
};
315
 
316
/* Ironlake / Sandybridge
317
 *
318
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
319
 * the range value for them is (actual_value - 2).
320
 */
321
static const intel_limit_t intel_limits_ironlake_dac = {
322
	.dot = { .min = 25000, .max = 350000 },
323
	.vco = { .min = 1760000, .max = 3510000 },
324
	.n = { .min = 1, .max = 5 },
325
	.m = { .min = 79, .max = 127 },
326
	.m1 = { .min = 12, .max = 22 },
327
	.m2 = { .min = 5, .max = 9 },
328
	.p = { .min = 5, .max = 80 },
329
	.p1 = { .min = 1, .max = 8 },
330
	.p2 = { .dot_limit = 225000,
331
		.p2_slow = 10, .p2_fast = 5 },
332
};
333
 
334
static const intel_limit_t intel_limits_ironlake_single_lvds = {
335
	.dot = { .min = 25000, .max = 350000 },
336
	.vco = { .min = 1760000, .max = 3510000 },
337
	.n = { .min = 1, .max = 3 },
338
	.m = { .min = 79, .max = 118 },
339
	.m1 = { .min = 12, .max = 22 },
340
	.m2 = { .min = 5, .max = 9 },
341
	.p = { .min = 28, .max = 112 },
342
	.p1 = { .min = 2, .max = 8 },
343
	.p2 = { .dot_limit = 225000,
344
		.p2_slow = 14, .p2_fast = 14 },
345
};
346
 
347
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
348
	.dot = { .min = 25000, .max = 350000 },
349
	.vco = { .min = 1760000, .max = 3510000 },
350
	.n = { .min = 1, .max = 3 },
351
	.m = { .min = 79, .max = 127 },
352
	.m1 = { .min = 12, .max = 22 },
353
	.m2 = { .min = 5, .max = 9 },
354
	.p = { .min = 14, .max = 56 },
355
	.p1 = { .min = 2, .max = 8 },
356
	.p2 = { .dot_limit = 225000,
357
		.p2_slow = 7, .p2_fast = 7 },
358
};
359
 
360
/* LVDS 100mhz refclk limits. */
361
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
362
	.dot = { .min = 25000, .max = 350000 },
363
	.vco = { .min = 1760000, .max = 3510000 },
364
	.n = { .min = 1, .max = 2 },
365
	.m = { .min = 79, .max = 126 },
366
	.m1 = { .min = 12, .max = 22 },
367
	.m2 = { .min = 5, .max = 9 },
368
	.p = { .min = 28, .max = 112 },
2342 Serge 369
	.p1 = { .min = 2, .max = 8 },
2327 Serge 370
	.p2 = { .dot_limit = 225000,
371
		.p2_slow = 14, .p2_fast = 14 },
372
};
373
 
374
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
375
	.dot = { .min = 25000, .max = 350000 },
376
	.vco = { .min = 1760000, .max = 3510000 },
377
	.n = { .min = 1, .max = 3 },
378
	.m = { .min = 79, .max = 126 },
379
	.m1 = { .min = 12, .max = 22 },
380
	.m2 = { .min = 5, .max = 9 },
381
	.p = { .min = 14, .max = 42 },
2342 Serge 382
	.p1 = { .min = 2, .max = 6 },
2327 Serge 383
	.p2 = { .dot_limit = 225000,
384
		.p2_slow = 7, .p2_fast = 7 },
385
};
386
 
4560 Serge 387
static const intel_limit_t intel_limits_vlv = {
388
	 /*
389
	  * These are the data rate limits (measured in fast clocks)
390
	  * since those are the strictest limits we have. The fast
391
	  * clock and actual rate limits are more relaxed, so checking
392
	  * them would make no difference.
393
	  */
394
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
3031 serge 395
	.vco = { .min = 4000000, .max = 6000000 },
396
	.n = { .min = 1, .max = 7 },
397
	.m1 = { .min = 2, .max = 3 },
398
	.m2 = { .min = 11, .max = 156 },
399
	.p1 = { .min = 2, .max = 3 },
4560 Serge 400
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
3031 serge 401
};
402
 
5060 serge 403
static const intel_limit_t intel_limits_chv = {
404
	/*
405
	 * These are the data rate limits (measured in fast clocks)
406
	 * since those are the strictest limits we have.  The fast
407
	 * clock and actual rate limits are more relaxed, so checking
408
	 * them would make no difference.
409
	 */
410
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
411
	.vco = { .min = 4860000, .max = 6700000 },
412
	.n = { .min = 1, .max = 1 },
413
	.m1 = { .min = 2, .max = 2 },
414
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
415
	.p1 = { .min = 2, .max = 4 },
416
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
417
};
418
 
4560 Serge 419
static void vlv_clock(int refclk, intel_clock_t *clock)
420
{
421
	clock->m = clock->m1 * clock->m2;
422
	clock->p = clock->p1 * clock->p2;
423
	if (WARN_ON(clock->n == 0 || clock->p == 0))
424
		return;
425
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
426
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
427
}
3031 serge 428
 
4560 Serge 429
/**
430
 * Returns whether any output on the specified pipe is of the specified type
431
 */
432
static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
433
{
434
	struct drm_device *dev = crtc->dev;
435
	struct intel_encoder *encoder;
436
 
437
	for_each_encoder_on_crtc(dev, crtc, encoder)
438
		if (encoder->type == type)
439
			return true;
440
 
441
	return false;
442
}
443
 
2327 Serge 444
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
445
						int refclk)
446
{
447
	struct drm_device *dev = crtc->dev;
448
	const intel_limit_t *limit;
449
 
450
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 451
		if (intel_is_dual_link_lvds(dev)) {
2327 Serge 452
			if (refclk == 100000)
453
				limit = &intel_limits_ironlake_dual_lvds_100m;
454
			else
455
				limit = &intel_limits_ironlake_dual_lvds;
456
		} else {
457
			if (refclk == 100000)
458
				limit = &intel_limits_ironlake_single_lvds_100m;
459
			else
460
				limit = &intel_limits_ironlake_single_lvds;
461
		}
4104 Serge 462
	} else
2327 Serge 463
		limit = &intel_limits_ironlake_dac;
464
 
465
	return limit;
466
}
467
 
468
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
469
{
470
	struct drm_device *dev = crtc->dev;
471
	const intel_limit_t *limit;
472
 
473
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 474
		if (intel_is_dual_link_lvds(dev))
2327 Serge 475
			limit = &intel_limits_g4x_dual_channel_lvds;
476
		else
477
			limit = &intel_limits_g4x_single_channel_lvds;
478
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
479
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
480
		limit = &intel_limits_g4x_hdmi;
481
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
482
		limit = &intel_limits_g4x_sdvo;
483
	} else /* The option is for other outputs */
484
		limit = &intel_limits_i9xx_sdvo;
485
 
486
	return limit;
487
}
488
 
489
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
490
{
491
	struct drm_device *dev = crtc->dev;
492
	const intel_limit_t *limit;
493
 
494
	if (HAS_PCH_SPLIT(dev))
495
		limit = intel_ironlake_limit(crtc, refclk);
496
	else if (IS_G4X(dev)) {
497
		limit = intel_g4x_limit(crtc);
498
	} else if (IS_PINEVIEW(dev)) {
499
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
500
			limit = &intel_limits_pineview_lvds;
501
		else
502
			limit = &intel_limits_pineview_sdvo;
5060 serge 503
	} else if (IS_CHERRYVIEW(dev)) {
504
		limit = &intel_limits_chv;
3031 serge 505
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 506
		limit = &intel_limits_vlv;
2327 Serge 507
	} else if (!IS_GEN2(dev)) {
508
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
509
			limit = &intel_limits_i9xx_lvds;
510
		else
511
			limit = &intel_limits_i9xx_sdvo;
512
	} else {
513
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
514
			limit = &intel_limits_i8xx_lvds;
4104 Serge 515
		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
516
			limit = &intel_limits_i8xx_dvo;
2327 Serge 517
		else
4104 Serge 518
			limit = &intel_limits_i8xx_dac;
2327 Serge 519
	}
520
	return limit;
521
}
522
 
523
/* m1 is reserved as 0 in Pineview, n is a ring counter */
524
static void pineview_clock(int refclk, intel_clock_t *clock)
525
{
526
	clock->m = clock->m2 + 2;
527
	clock->p = clock->p1 * clock->p2;
4560 Serge 528
	if (WARN_ON(clock->n == 0 || clock->p == 0))
529
		return;
530
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
531
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
2327 Serge 532
}
533
 
4104 Serge 534
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
2327 Serge 535
{
4104 Serge 536
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
537
}
538
 
539
static void i9xx_clock(int refclk, intel_clock_t *clock)
540
{
541
	clock->m = i9xx_dpll_compute_m(clock);
2327 Serge 542
	clock->p = clock->p1 * clock->p2;
4560 Serge 543
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
544
		return;
545
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
546
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
2327 Serge 547
}
548
 
5060 serge 549
static void chv_clock(int refclk, intel_clock_t *clock)
550
{
551
	clock->m = clock->m1 * clock->m2;
552
	clock->p = clock->p1 * clock->p2;
553
	if (WARN_ON(clock->n == 0 || clock->p == 0))
554
		return;
555
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
556
			clock->n << 22);
557
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
558
}
559
 
2327 Serge 560
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
561
/**
562
 * Returns whether the given set of divisors are valid for a given refclk with
563
 * the given connectors.
564
 */
565
 
566
static bool intel_PLL_is_valid(struct drm_device *dev,
567
			       const intel_limit_t *limit,
568
			       const intel_clock_t *clock)
569
{
4560 Serge 570
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
571
		INTELPllInvalid("n out of range\n");
2327 Serge 572
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
2342 Serge 573
		INTELPllInvalid("p1 out of range\n");
2327 Serge 574
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
2342 Serge 575
		INTELPllInvalid("m2 out of range\n");
2327 Serge 576
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
2342 Serge 577
		INTELPllInvalid("m1 out of range\n");
4560 Serge 578
 
579
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
580
		if (clock->m1 <= clock->m2)
2342 Serge 581
		INTELPllInvalid("m1 <= m2\n");
4560 Serge 582
 
583
	if (!IS_VALLEYVIEW(dev)) {
584
		if (clock->p < limit->p.min || limit->p.max < clock->p)
585
			INTELPllInvalid("p out of range\n");
2327 Serge 586
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
2342 Serge 587
		INTELPllInvalid("m out of range\n");
4560 Serge 588
	}
589
 
2327 Serge 590
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
2342 Serge 591
		INTELPllInvalid("vco out of range\n");
2327 Serge 592
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
593
	 * connector, etc., rather than just a single range.
594
	 */
595
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
2342 Serge 596
		INTELPllInvalid("dot out of range\n");
2327 Serge 597
 
598
	return true;
599
}
600
 
601
static bool
4104 Serge 602
i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 603
		    int target, int refclk, intel_clock_t *match_clock,
604
		    intel_clock_t *best_clock)
2327 Serge 605
{
606
	struct drm_device *dev = crtc->dev;
607
	intel_clock_t clock;
608
	int err = target;
609
 
3480 Serge 610
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2327 Serge 611
		/*
3480 Serge 612
		 * For LVDS just rely on its current settings for dual-channel.
613
		 * We haven't figured out how to reliably set up different
614
		 * single/dual channel state, if we even can.
2327 Serge 615
		 */
3480 Serge 616
		if (intel_is_dual_link_lvds(dev))
2327 Serge 617
			clock.p2 = limit->p2.p2_fast;
618
		else
619
			clock.p2 = limit->p2.p2_slow;
620
	} else {
621
		if (target < limit->p2.dot_limit)
622
			clock.p2 = limit->p2.p2_slow;
623
		else
624
			clock.p2 = limit->p2.p2_fast;
625
	}
626
 
2342 Serge 627
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 628
 
629
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
630
	     clock.m1++) {
631
		for (clock.m2 = limit->m2.min;
632
		     clock.m2 <= limit->m2.max; clock.m2++) {
4104 Serge 633
			if (clock.m2 >= clock.m1)
2327 Serge 634
				break;
635
			for (clock.n = limit->n.min;
636
			     clock.n <= limit->n.max; clock.n++) {
637
				for (clock.p1 = limit->p1.min;
638
					clock.p1 <= limit->p1.max; clock.p1++) {
639
					int this_err;
640
 
4104 Serge 641
					i9xx_clock(refclk, &clock);
2327 Serge 642
					if (!intel_PLL_is_valid(dev, limit,
643
								&clock))
644
						continue;
3031 serge 645
					if (match_clock &&
646
					    clock.p != match_clock->p)
647
						continue;
2327 Serge 648
 
649
					this_err = abs(clock.dot - target);
650
					if (this_err < err) {
651
						*best_clock = clock;
652
						err = this_err;
653
					}
654
				}
655
			}
656
		}
657
	}
658
 
659
	return (err != target);
660
}
661
 
662
static bool
4104 Serge 663
pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
664
		   int target, int refclk, intel_clock_t *match_clock,
665
		   intel_clock_t *best_clock)
666
{
667
	struct drm_device *dev = crtc->dev;
668
	intel_clock_t clock;
669
	int err = target;
670
 
671
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
672
		/*
673
		 * For LVDS just rely on its current settings for dual-channel.
674
		 * We haven't figured out how to reliably set up different
675
		 * single/dual channel state, if we even can.
676
		 */
677
		if (intel_is_dual_link_lvds(dev))
678
			clock.p2 = limit->p2.p2_fast;
679
		else
680
			clock.p2 = limit->p2.p2_slow;
681
	} else {
682
		if (target < limit->p2.dot_limit)
683
			clock.p2 = limit->p2.p2_slow;
684
		else
685
			clock.p2 = limit->p2.p2_fast;
686
	}
687
 
688
	memset(best_clock, 0, sizeof(*best_clock));
689
 
690
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
691
	     clock.m1++) {
692
		for (clock.m2 = limit->m2.min;
693
		     clock.m2 <= limit->m2.max; clock.m2++) {
694
			for (clock.n = limit->n.min;
695
			     clock.n <= limit->n.max; clock.n++) {
696
				for (clock.p1 = limit->p1.min;
697
					clock.p1 <= limit->p1.max; clock.p1++) {
698
					int this_err;
699
 
700
					pineview_clock(refclk, &clock);
701
					if (!intel_PLL_is_valid(dev, limit,
702
								&clock))
703
						continue;
704
					if (match_clock &&
705
					    clock.p != match_clock->p)
706
						continue;
707
 
708
					this_err = abs(clock.dot - target);
709
					if (this_err < err) {
710
						*best_clock = clock;
711
						err = this_err;
712
					}
713
				}
714
			}
715
		}
716
	}
717
 
718
	return (err != target);
719
}
720
 
721
static bool
722
g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 723
			int target, int refclk, intel_clock_t *match_clock,
724
			intel_clock_t *best_clock)
2327 Serge 725
{
726
	struct drm_device *dev = crtc->dev;
727
	intel_clock_t clock;
728
	int max_n;
729
	bool found;
730
	/* approximately equals target * 0.00585 */
731
	int err_most = (target >> 8) + (target >> 9);
732
	found = false;
733
 
734
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 735
		if (intel_is_dual_link_lvds(dev))
2327 Serge 736
			clock.p2 = limit->p2.p2_fast;
737
		else
738
			clock.p2 = limit->p2.p2_slow;
739
	} else {
740
		if (target < limit->p2.dot_limit)
741
			clock.p2 = limit->p2.p2_slow;
742
		else
743
			clock.p2 = limit->p2.p2_fast;
744
	}
745
 
746
	memset(best_clock, 0, sizeof(*best_clock));
747
	max_n = limit->n.max;
748
	/* based on hardware requirement, prefer smaller n to precision */
749
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
750
		/* based on hardware requirement, prefere larger m1,m2 */
751
		for (clock.m1 = limit->m1.max;
752
		     clock.m1 >= limit->m1.min; clock.m1--) {
753
			for (clock.m2 = limit->m2.max;
754
			     clock.m2 >= limit->m2.min; clock.m2--) {
755
				for (clock.p1 = limit->p1.max;
756
				     clock.p1 >= limit->p1.min; clock.p1--) {
757
					int this_err;
758
 
4104 Serge 759
					i9xx_clock(refclk, &clock);
2327 Serge 760
					if (!intel_PLL_is_valid(dev, limit,
761
								&clock))
762
						continue;
763
 
764
					this_err = abs(clock.dot - target);
765
					if (this_err < err_most) {
766
						*best_clock = clock;
767
						err_most = this_err;
768
						max_n = clock.n;
769
						found = true;
770
					}
771
				}
772
			}
773
		}
774
	}
775
	return found;
776
}
777
 
778
static bool
4104 Serge 779
vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 780
			int target, int refclk, intel_clock_t *match_clock,
781
			intel_clock_t *best_clock)
782
{
4560 Serge 783
	struct drm_device *dev = crtc->dev;
784
	intel_clock_t clock;
785
	unsigned int bestppm = 1000000;
786
	/* min update 19.2 MHz */
787
	int max_n = min(limit->n.max, refclk / 19200);
788
	bool found = false;
2327 Serge 789
 
4560 Serge 790
	target *= 5; /* fast clock */
3031 serge 791
 
4560 Serge 792
	memset(best_clock, 0, sizeof(*best_clock));
793
 
3031 serge 794
	/* based on hardware requirement, prefer smaller n to precision */
4560 Serge 795
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
796
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
797
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
798
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
799
				clock.p = clock.p1 * clock.p2;
3031 serge 800
				/* based on hardware requirement, prefer bigger m1,m2 values */
4560 Serge 801
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
802
					unsigned int ppm, diff;
803
 
804
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
805
								     refclk * clock.m1);
806
 
807
					vlv_clock(refclk, &clock);
808
 
809
					if (!intel_PLL_is_valid(dev, limit,
810
								&clock))
811
						continue;
812
 
813
					diff = abs(clock.dot - target);
814
					ppm = div_u64(1000000ULL * diff, target);
815
 
816
					if (ppm < 100 && clock.p > best_clock->p) {
3031 serge 817
							bestppm = 0;
4560 Serge 818
						*best_clock = clock;
819
						found = true;
3031 serge 820
						}
4560 Serge 821
 
822
					if (bestppm >= 10 && ppm < bestppm - 10) {
823
						bestppm = ppm;
824
						*best_clock = clock;
825
						found = true;
3031 serge 826
						}
827
						}
828
					}
829
				}
830
			}
831
 
4560 Serge 832
	return found;
3031 serge 833
}
834
 
5060 serge 835
static bool
836
chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
837
		   int target, int refclk, intel_clock_t *match_clock,
838
		   intel_clock_t *best_clock)
839
{
840
	struct drm_device *dev = crtc->dev;
841
	intel_clock_t clock;
842
	uint64_t m2;
843
	int found = false;
844
 
845
	memset(best_clock, 0, sizeof(*best_clock));
846
 
847
	/*
848
	 * Based on hardware doc, the n always set to 1, and m1 always
849
	 * set to 2.  If requires to support 200Mhz refclk, we need to
850
	 * revisit this because n may not 1 anymore.
851
	 */
852
	clock.n = 1, clock.m1 = 2;
853
	target *= 5;	/* fast clock */
854
 
855
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
856
		for (clock.p2 = limit->p2.p2_fast;
857
				clock.p2 >= limit->p2.p2_slow;
858
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
859
 
860
			clock.p = clock.p1 * clock.p2;
861
 
862
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
863
					clock.n) << 22, refclk * clock.m1);
864
 
865
			if (m2 > INT_MAX/clock.m1)
866
				continue;
867
 
868
			clock.m2 = m2;
869
 
870
			chv_clock(refclk, &clock);
871
 
872
			if (!intel_PLL_is_valid(dev, limit, &clock))
873
				continue;
874
 
875
			/* based on hardware requirement, prefer bigger p
876
			 */
877
			if (clock.p > best_clock->p) {
878
				*best_clock = clock;
879
				found = true;
880
			}
881
		}
882
	}
883
 
884
	return found;
885
}
886
 
4560 Serge 887
bool intel_crtc_active(struct drm_crtc *crtc)
888
{
889
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
890
 
891
	/* Be paranoid as we can arrive here with only partial
892
	 * state retrieved from the hardware during setup.
893
	 *
894
	 * We can ditch the adjusted_mode.crtc_clock check as soon
895
	 * as Haswell has gained clock readout/fastboot support.
896
	 *
5060 serge 897
	 * We can ditch the crtc->primary->fb check as soon as we can
4560 Serge 898
	 * properly reconstruct framebuffers.
899
	 */
5060 serge 900
	return intel_crtc->active && crtc->primary->fb &&
4560 Serge 901
		intel_crtc->config.adjusted_mode.crtc_clock;
902
}
903
 
3243 Serge 904
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
905
					     enum pipe pipe)
906
{
907
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
908
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
909
 
3746 Serge 910
	return intel_crtc->config.cpu_transcoder;
3243 Serge 911
}
912
 
4560 Serge 913
static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
3031 serge 914
{
915
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 916
	u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
3031 serge 917
 
918
	frame = I915_READ(frame_reg);
919
 
920
	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
5060 serge 921
		WARN(1, "vblank wait timed out\n");
3031 serge 922
}
923
 
2327 Serge 924
/**
925
 * intel_wait_for_vblank - wait for vblank on a given pipe
926
 * @dev: drm device
927
 * @pipe: pipe to wait for
928
 *
929
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
930
 * mode setting code.
931
 */
932
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
933
{
934
	struct drm_i915_private *dev_priv = dev->dev_private;
935
	int pipestat_reg = PIPESTAT(pipe);
936
 
4560 Serge 937
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
938
		g4x_wait_for_vblank(dev, pipe);
3031 serge 939
		return;
940
	}
941
 
2327 Serge 942
	/* Clear existing vblank status. Note this will clear any other
943
	 * sticky status fields as well.
944
	 *
945
	 * This races with i915_driver_irq_handler() with the result
946
	 * that either function could miss a vblank event.  Here it is not
947
	 * fatal, as we will either wait upon the next vblank interrupt or
948
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
949
	 * called during modeset at which time the GPU should be idle and
950
	 * should *not* be performing page flips and thus not waiting on
951
	 * vblanks...
952
	 * Currently, the result of us stealing a vblank from the irq
953
	 * handler is that a single frame will be skipped during swapbuffers.
954
	 */
955
	I915_WRITE(pipestat_reg,
956
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
957
 
958
	/* Wait for vblank interrupt bit to set */
959
	if (wait_for(I915_READ(pipestat_reg) &
960
		     PIPE_VBLANK_INTERRUPT_STATUS,
961
		     50))
962
		DRM_DEBUG_KMS("vblank wait timed out\n");
963
}
964
 
4560 Serge 965
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
966
{
967
	struct drm_i915_private *dev_priv = dev->dev_private;
968
	u32 reg = PIPEDSL(pipe);
969
	u32 line1, line2;
970
	u32 line_mask;
971
 
972
	if (IS_GEN2(dev))
973
		line_mask = DSL_LINEMASK_GEN2;
974
	else
975
		line_mask = DSL_LINEMASK_GEN3;
976
 
977
	line1 = I915_READ(reg) & line_mask;
978
	mdelay(5);
979
	line2 = I915_READ(reg) & line_mask;
980
 
981
	return line1 == line2;
982
}
983
 
2327 Serge 984
/*
985
 * intel_wait_for_pipe_off - wait for pipe to turn off
986
 * @dev: drm device
987
 * @pipe: pipe to wait for
988
 *
989
 * After disabling a pipe, we can't wait for vblank in the usual way,
990
 * spinning on the vblank interrupt status bit, since we won't actually
991
 * see an interrupt when the pipe is disabled.
992
 *
993
 * On Gen4 and above:
994
 *   wait for the pipe register state bit to turn off
995
 *
996
 * Otherwise:
997
 *   wait for the display line value to settle (it usually
998
 *   ends up stopping at the start of the next frame).
999
 *
1000
 */
1001
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1002
{
1003
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 1004
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1005
								      pipe);
2327 Serge 1006
 
1007
	if (INTEL_INFO(dev)->gen >= 4) {
3243 Serge 1008
		int reg = PIPECONF(cpu_transcoder);
2327 Serge 1009
 
1010
		/* Wait for the Pipe State to go off */
1011
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1012
			     100))
3031 serge 1013
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 1014
	} else {
1015
		/* Wait for the display line to settle */
4560 Serge 1016
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
3031 serge 1017
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 1018
	}
1019
}
1020
 
3480 Serge 1021
/*
1022
 * ibx_digital_port_connected - is the specified port connected?
1023
 * @dev_priv: i915 private structure
1024
 * @port: the port to test
1025
 *
1026
 * Returns true if @port is connected, false otherwise.
1027
 */
1028
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
1029
				struct intel_digital_port *port)
1030
{
1031
	u32 bit;
1032
 
1033
	if (HAS_PCH_IBX(dev_priv->dev)) {
5060 serge 1034
		switch (port->port) {
3480 Serge 1035
		case PORT_B:
1036
			bit = SDE_PORTB_HOTPLUG;
1037
			break;
1038
		case PORT_C:
1039
			bit = SDE_PORTC_HOTPLUG;
1040
			break;
1041
		case PORT_D:
1042
			bit = SDE_PORTD_HOTPLUG;
1043
			break;
1044
		default:
1045
			return true;
1046
		}
1047
	} else {
5060 serge 1048
		switch (port->port) {
3480 Serge 1049
		case PORT_B:
1050
			bit = SDE_PORTB_HOTPLUG_CPT;
1051
			break;
1052
		case PORT_C:
1053
			bit = SDE_PORTC_HOTPLUG_CPT;
1054
			break;
1055
		case PORT_D:
1056
			bit = SDE_PORTD_HOTPLUG_CPT;
1057
			break;
1058
		default:
1059
			return true;
1060
		}
1061
	}
1062
 
1063
	return I915_READ(SDEISR) & bit;
1064
}
1065
 
2327 Serge 1066
static const char *state_string(bool enabled)
1067
{
1068
	return enabled ? "on" : "off";
1069
}
1070
 
1071
/* Only for pre-ILK configs */
4104 Serge 1072
void assert_pll(struct drm_i915_private *dev_priv,
2327 Serge 1073
		       enum pipe pipe, bool state)
1074
{
1075
	int reg;
1076
	u32 val;
1077
	bool cur_state;
1078
 
1079
	reg = DPLL(pipe);
1080
	val = I915_READ(reg);
1081
	cur_state = !!(val & DPLL_VCO_ENABLE);
1082
	WARN(cur_state != state,
1083
	     "PLL state assertion failure (expected %s, current %s)\n",
1084
	     state_string(state), state_string(cur_state));
1085
}
1086
 
4560 Serge 1087
/* XXX: the dsi pll is shared between MIPI DSI ports */
1088
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1089
{
1090
	u32 val;
1091
	bool cur_state;
1092
 
1093
	mutex_lock(&dev_priv->dpio_lock);
1094
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1095
	mutex_unlock(&dev_priv->dpio_lock);
1096
 
1097
	cur_state = val & DSI_PLL_VCO_EN;
1098
	WARN(cur_state != state,
1099
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1100
	     state_string(state), state_string(cur_state));
1101
}
1102
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1103
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1104
 
4104 Serge 1105
struct intel_shared_dpll *
1106
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1107
{
1108
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1109
 
1110
	if (crtc->config.shared_dpll < 0)
1111
		return NULL;
1112
 
1113
	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
1114
}
1115
 
2327 Serge 1116
/* For ILK+ */
4104 Serge 1117
void assert_shared_dpll(struct drm_i915_private *dev_priv,
1118
			       struct intel_shared_dpll *pll,
3031 serge 1119
			   bool state)
2327 Serge 1120
{
1121
	bool cur_state;
4104 Serge 1122
	struct intel_dpll_hw_state hw_state;
2327 Serge 1123
 
3031 serge 1124
	if (WARN (!pll,
4104 Serge 1125
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
3031 serge 1126
		return;
2342 Serge 1127
 
4104 Serge 1128
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
3031 serge 1129
	WARN(cur_state != state,
4104 Serge 1130
	     "%s assertion failure (expected %s, current %s)\n",
1131
	     pll->name, state_string(state), state_string(cur_state));
2327 Serge 1132
}
1133
 
1134
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1135
			  enum pipe pipe, bool state)
1136
{
1137
	int reg;
1138
	u32 val;
1139
	bool cur_state;
3243 Serge 1140
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1141
								      pipe);
2327 Serge 1142
 
3480 Serge 1143
	if (HAS_DDI(dev_priv->dev)) {
1144
		/* DDI does not have a specific FDI_TX register */
3243 Serge 1145
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
3031 serge 1146
		val = I915_READ(reg);
3243 Serge 1147
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
3031 serge 1148
	} else {
2327 Serge 1149
	reg = FDI_TX_CTL(pipe);
1150
	val = I915_READ(reg);
1151
	cur_state = !!(val & FDI_TX_ENABLE);
3031 serge 1152
	}
2327 Serge 1153
	WARN(cur_state != state,
1154
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1155
	     state_string(state), state_string(cur_state));
1156
}
1157
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1158
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1159
 
1160
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1161
			  enum pipe pipe, bool state)
1162
{
1163
	int reg;
1164
	u32 val;
1165
	bool cur_state;
1166
 
1167
	reg = FDI_RX_CTL(pipe);
1168
	val = I915_READ(reg);
1169
	cur_state = !!(val & FDI_RX_ENABLE);
1170
	WARN(cur_state != state,
1171
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1172
	     state_string(state), state_string(cur_state));
1173
}
1174
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1175
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1176
 
1177
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1178
				      enum pipe pipe)
1179
{
1180
	int reg;
1181
	u32 val;
1182
 
1183
	/* ILK FDI PLL is always enabled */
5060 serge 1184
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
2327 Serge 1185
		return;
1186
 
3031 serge 1187
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
3480 Serge 1188
	if (HAS_DDI(dev_priv->dev))
3031 serge 1189
		return;
1190
 
2327 Serge 1191
	reg = FDI_TX_CTL(pipe);
1192
	val = I915_READ(reg);
1193
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1194
}
1195
 
4104 Serge 1196
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1197
		       enum pipe pipe, bool state)
2327 Serge 1198
{
1199
	int reg;
1200
	u32 val;
4104 Serge 1201
	bool cur_state;
2327 Serge 1202
 
1203
	reg = FDI_RX_CTL(pipe);
1204
	val = I915_READ(reg);
4104 Serge 1205
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1206
	WARN(cur_state != state,
1207
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1208
	     state_string(state), state_string(cur_state));
2327 Serge 1209
}
1210
 
1211
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1212
				  enum pipe pipe)
1213
{
1214
	int pp_reg, lvds_reg;
1215
	u32 val;
1216
	enum pipe panel_pipe = PIPE_A;
1217
	bool locked = true;
1218
 
1219
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1220
		pp_reg = PCH_PP_CONTROL;
1221
		lvds_reg = PCH_LVDS;
1222
	} else {
1223
		pp_reg = PP_CONTROL;
1224
		lvds_reg = LVDS;
1225
	}
1226
 
1227
	val = I915_READ(pp_reg);
1228
	if (!(val & PANEL_POWER_ON) ||
1229
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1230
		locked = false;
1231
 
1232
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1233
		panel_pipe = PIPE_B;
1234
 
1235
	WARN(panel_pipe == pipe && locked,
1236
	     "panel assertion failure, pipe %c regs locked\n",
1237
	     pipe_name(pipe));
1238
}
1239
 
4560 Serge 1240
static void assert_cursor(struct drm_i915_private *dev_priv,
1241
			  enum pipe pipe, bool state)
1242
{
1243
	struct drm_device *dev = dev_priv->dev;
1244
	bool cur_state;
1245
 
5060 serge 1246
	if (IS_845G(dev) || IS_I865G(dev))
4560 Serge 1247
		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1248
	else
1249
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1250
 
1251
	WARN(cur_state != state,
1252
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1253
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1254
}
1255
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1256
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1257
 
2342 Serge 1258
void assert_pipe(struct drm_i915_private *dev_priv,
2327 Serge 1259
			enum pipe pipe, bool state)
1260
{
1261
	int reg;
1262
	u32 val;
1263
	bool cur_state;
3243 Serge 1264
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1265
								      pipe);
2327 Serge 1266
 
3031 serge 1267
	/* if we need the pipe A quirk it must be always on */
1268
	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1269
		state = true;
1270
 
5060 serge 1271
	if (!intel_display_power_enabled(dev_priv,
4104 Serge 1272
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
3480 Serge 1273
		cur_state = false;
1274
	} else {
3243 Serge 1275
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1276
	val = I915_READ(reg);
1277
	cur_state = !!(val & PIPECONF_ENABLE);
3480 Serge 1278
	}
1279
 
2327 Serge 1280
	WARN(cur_state != state,
1281
	     "pipe %c assertion failure (expected %s, current %s)\n",
1282
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1283
}
1284
 
3031 serge 1285
static void assert_plane(struct drm_i915_private *dev_priv,
1286
			 enum plane plane, bool state)
2327 Serge 1287
{
1288
	int reg;
1289
	u32 val;
3031 serge 1290
	bool cur_state;
2327 Serge 1291
 
1292
	reg = DSPCNTR(plane);
1293
	val = I915_READ(reg);
3031 serge 1294
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1295
	WARN(cur_state != state,
1296
	     "plane %c assertion failure (expected %s, current %s)\n",
1297
	     plane_name(plane), state_string(state), state_string(cur_state));
2327 Serge 1298
}
1299
 
3031 serge 1300
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1301
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1302
 
2327 Serge 1303
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1304
				   enum pipe pipe)
1305
{
4104 Serge 1306
	struct drm_device *dev = dev_priv->dev;
2327 Serge 1307
	int reg, i;
1308
	u32 val;
1309
	int cur_pipe;
1310
 
4104 Serge 1311
	/* Primary planes are fixed to pipes on gen4+ */
1312
	if (INTEL_INFO(dev)->gen >= 4) {
3031 serge 1313
		reg = DSPCNTR(pipe);
1314
		val = I915_READ(reg);
5060 serge 1315
		WARN(val & DISPLAY_PLANE_ENABLE,
3031 serge 1316
		     "plane %c assertion failure, should be disabled but not\n",
1317
		     plane_name(pipe));
2327 Serge 1318
		return;
3031 serge 1319
	}
2327 Serge 1320
 
1321
	/* Need to check both planes against the pipe */
4104 Serge 1322
	for_each_pipe(i) {
2327 Serge 1323
		reg = DSPCNTR(i);
1324
		val = I915_READ(reg);
1325
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1326
			DISPPLANE_SEL_PIPE_SHIFT;
1327
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1328
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1329
		     plane_name(i), pipe_name(pipe));
1330
	}
1331
}
1332
 
3746 Serge 1333
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1334
				    enum pipe pipe)
1335
{
4104 Serge 1336
	struct drm_device *dev = dev_priv->dev;
5060 serge 1337
	int reg, sprite;
3746 Serge 1338
	u32 val;
1339
 
4104 Serge 1340
	if (IS_VALLEYVIEW(dev)) {
5060 serge 1341
		for_each_sprite(pipe, sprite) {
1342
			reg = SPCNTR(pipe, sprite);
3746 Serge 1343
		val = I915_READ(reg);
5060 serge 1344
			WARN(val & SP_ENABLE,
4104 Serge 1345
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
5060 serge 1346
			     sprite_name(pipe, sprite), pipe_name(pipe));
4104 Serge 1347
		}
1348
	} else if (INTEL_INFO(dev)->gen >= 7) {
1349
		reg = SPRCTL(pipe);
1350
		val = I915_READ(reg);
5060 serge 1351
		WARN(val & SPRITE_ENABLE,
4104 Serge 1352
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1353
		     plane_name(pipe), pipe_name(pipe));
1354
	} else if (INTEL_INFO(dev)->gen >= 5) {
1355
		reg = DVSCNTR(pipe);
1356
		val = I915_READ(reg);
5060 serge 1357
		WARN(val & DVS_ENABLE,
4104 Serge 1358
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1359
		     plane_name(pipe), pipe_name(pipe));
3746 Serge 1360
	}
1361
}
1362
 
4560 Serge 1363
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
2327 Serge 1364
{
1365
	u32 val;
1366
	bool enabled;
1367
 
4560 Serge 1368
	WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
3031 serge 1369
 
2327 Serge 1370
	val = I915_READ(PCH_DREF_CONTROL);
1371
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1372
			    DREF_SUPERSPREAD_SOURCE_MASK));
1373
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1374
}
1375
 
4104 Serge 1376
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
2327 Serge 1377
				       enum pipe pipe)
1378
{
1379
	int reg;
1380
	u32 val;
1381
	bool enabled;
1382
 
4104 Serge 1383
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1384
	val = I915_READ(reg);
1385
	enabled = !!(val & TRANS_ENABLE);
1386
	WARN(enabled,
1387
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1388
	     pipe_name(pipe));
1389
}
1390
 
1391
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1392
			    enum pipe pipe, u32 port_sel, u32 val)
1393
{
1394
	if ((val & DP_PORT_EN) == 0)
1395
		return false;
1396
 
1397
	if (HAS_PCH_CPT(dev_priv->dev)) {
1398
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1399
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1400
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1401
			return false;
5060 serge 1402
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1403
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1404
			return false;
2327 Serge 1405
	} else {
1406
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1407
			return false;
1408
	}
1409
	return true;
1410
}
1411
 
1412
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1413
			      enum pipe pipe, u32 val)
1414
{
3746 Serge 1415
	if ((val & SDVO_ENABLE) == 0)
2327 Serge 1416
		return false;
1417
 
1418
	if (HAS_PCH_CPT(dev_priv->dev)) {
3746 Serge 1419
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
2327 Serge 1420
			return false;
5060 serge 1421
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1422
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1423
			return false;
2327 Serge 1424
	} else {
3746 Serge 1425
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
2327 Serge 1426
			return false;
1427
	}
1428
	return true;
1429
}
1430
 
1431
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1432
			      enum pipe pipe, u32 val)
1433
{
1434
	if ((val & LVDS_PORT_EN) == 0)
1435
		return false;
1436
 
1437
	if (HAS_PCH_CPT(dev_priv->dev)) {
1438
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1439
			return false;
1440
	} else {
1441
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1442
			return false;
1443
	}
1444
	return true;
1445
}
1446
 
1447
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1448
			      enum pipe pipe, u32 val)
1449
{
1450
	if ((val & ADPA_DAC_ENABLE) == 0)
1451
		return false;
1452
	if (HAS_PCH_CPT(dev_priv->dev)) {
1453
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1454
			return false;
1455
	} else {
1456
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1457
			return false;
1458
	}
1459
	return true;
1460
}
1461
 
1462
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1463
				   enum pipe pipe, int reg, u32 port_sel)
1464
{
1465
	u32 val = I915_READ(reg);
1466
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1467
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1468
	     reg, pipe_name(pipe));
3031 serge 1469
 
1470
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1471
	     && (val & DP_PIPEB_SELECT),
1472
	     "IBX PCH dp port still using transcoder B\n");
2327 Serge 1473
}
1474
 
1475
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1476
				     enum pipe pipe, int reg)
1477
{
1478
	u32 val = I915_READ(reg);
3031 serge 1479
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1480
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
2327 Serge 1481
	     reg, pipe_name(pipe));
3031 serge 1482
 
3746 Serge 1483
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
3031 serge 1484
	     && (val & SDVO_PIPE_B_SELECT),
1485
	     "IBX PCH hdmi port still using transcoder B\n");
2327 Serge 1486
}
1487
 
1488
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1489
				      enum pipe pipe)
1490
{
1491
	int reg;
1492
	u32 val;
1493
 
1494
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1495
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1496
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1497
 
1498
	reg = PCH_ADPA;
1499
	val = I915_READ(reg);
3031 serge 1500
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1501
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1502
	     pipe_name(pipe));
1503
 
1504
	reg = PCH_LVDS;
1505
	val = I915_READ(reg);
3031 serge 1506
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1507
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1508
	     pipe_name(pipe));
1509
 
3746 Serge 1510
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1511
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1512
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
2327 Serge 1513
}
1514
 
4560 Serge 1515
static void intel_init_dpio(struct drm_device *dev)
1516
{
1517
	struct drm_i915_private *dev_priv = dev->dev_private;
1518
 
1519
	if (!IS_VALLEYVIEW(dev))
1520
		return;
1521
 
5060 serge 1522
	/*
1523
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1524
	 * CHV x1 PHY (DP/HDMI D)
1525
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1526
	 */
1527
	if (IS_CHERRYVIEW(dev)) {
1528
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1529
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1530
	} else {
4560 Serge 1531
	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
5060 serge 1532
	}
4560 Serge 1533
}
1534
 
1535
static void intel_reset_dpio(struct drm_device *dev)
1536
{
1537
	struct drm_i915_private *dev_priv = dev->dev_private;
1538
 
5060 serge 1539
	if (IS_CHERRYVIEW(dev)) {
1540
		enum dpio_phy phy;
1541
		u32 val;
4560 Serge 1542
 
5060 serge 1543
		for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
1544
			/* Poll for phypwrgood signal */
1545
			if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
1546
						PHY_POWERGOOD(phy), 1))
1547
				DRM_ERROR("Display PHY %d is not power up\n", phy);
4560 Serge 1548
 
1549
	/*
5060 serge 1550
			 * Deassert common lane reset for PHY.
1551
			 *
1552
			 * This should only be done on init and resume from S3
1553
			 * with both PLLs disabled, or we risk losing DPIO and
1554
			 * PLL synchronization.
1555
			 */
1556
			val = I915_READ(DISPLAY_PHY_CONTROL);
1557
			I915_WRITE(DISPLAY_PHY_CONTROL,
1558
				PHY_COM_LANE_RESET_DEASSERT(phy, val));
1559
		}
1560
	}
4560 Serge 1561
}
1562
 
4104 Serge 1563
static void vlv_enable_pll(struct intel_crtc *crtc)
2327 Serge 1564
{
4104 Serge 1565
	struct drm_device *dev = crtc->base.dev;
1566
	struct drm_i915_private *dev_priv = dev->dev_private;
1567
	int reg = DPLL(crtc->pipe);
1568
	u32 dpll = crtc->config.dpll_hw_state.dpll;
2327 Serge 1569
 
4104 Serge 1570
	assert_pipe_disabled(dev_priv, crtc->pipe);
1571
 
2327 Serge 1572
    /* No really, not for ILK+ */
4104 Serge 1573
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
2327 Serge 1574
 
1575
    /* PLL is protected by panel, make sure we can write it */
1576
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
4104 Serge 1577
		assert_panel_unlocked(dev_priv, crtc->pipe);
2327 Serge 1578
 
4104 Serge 1579
	I915_WRITE(reg, dpll);
1580
	POSTING_READ(reg);
1581
	udelay(150);
2327 Serge 1582
 
4104 Serge 1583
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1584
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1585
 
1586
	I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1587
	POSTING_READ(DPLL_MD(crtc->pipe));
1588
 
1589
	/* We do this three times for luck */
1590
	I915_WRITE(reg, dpll);
1591
	POSTING_READ(reg);
1592
	udelay(150); /* wait for warmup */
1593
	I915_WRITE(reg, dpll);
1594
	POSTING_READ(reg);
1595
	udelay(150); /* wait for warmup */
1596
	I915_WRITE(reg, dpll);
1597
	POSTING_READ(reg);
1598
	udelay(150); /* wait for warmup */
1599
}
1600
 
5060 serge 1601
static void chv_enable_pll(struct intel_crtc *crtc)
1602
{
1603
	struct drm_device *dev = crtc->base.dev;
1604
	struct drm_i915_private *dev_priv = dev->dev_private;
1605
	int pipe = crtc->pipe;
1606
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1607
	u32 tmp;
1608
 
1609
	assert_pipe_disabled(dev_priv, crtc->pipe);
1610
 
1611
	BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1612
 
1613
	mutex_lock(&dev_priv->dpio_lock);
1614
 
1615
	/* Enable back the 10bit clock to display controller */
1616
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1617
	tmp |= DPIO_DCLKP_EN;
1618
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1619
 
1620
	/*
1621
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1622
	 */
1623
	udelay(1);
1624
 
1625
	/* Enable PLL */
1626
	I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
1627
 
1628
	/* Check PLL is locked */
1629
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1630
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1631
 
1632
	/* not sure when this should be written */
1633
	I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
1634
	POSTING_READ(DPLL_MD(pipe));
1635
 
1636
	mutex_unlock(&dev_priv->dpio_lock);
1637
}
1638
 
4104 Serge 1639
static void i9xx_enable_pll(struct intel_crtc *crtc)
1640
{
1641
	struct drm_device *dev = crtc->base.dev;
1642
	struct drm_i915_private *dev_priv = dev->dev_private;
1643
	int reg = DPLL(crtc->pipe);
1644
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1645
 
1646
	assert_pipe_disabled(dev_priv, crtc->pipe);
1647
 
1648
	/* No really, not for ILK+ */
5060 serge 1649
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
4104 Serge 1650
 
1651
	/* PLL is protected by panel, make sure we can write it */
1652
	if (IS_MOBILE(dev) && !IS_I830(dev))
1653
		assert_panel_unlocked(dev_priv, crtc->pipe);
1654
 
1655
	I915_WRITE(reg, dpll);
1656
 
1657
	/* Wait for the clocks to stabilize. */
1658
	POSTING_READ(reg);
1659
	udelay(150);
1660
 
1661
	if (INTEL_INFO(dev)->gen >= 4) {
1662
		I915_WRITE(DPLL_MD(crtc->pipe),
1663
			   crtc->config.dpll_hw_state.dpll_md);
1664
	} else {
1665
		/* The pixel multiplier can only be updated once the
1666
		 * DPLL is enabled and the clocks are stable.
1667
		 *
1668
		 * So write it again.
1669
		 */
1670
		I915_WRITE(reg, dpll);
1671
	}
1672
 
2327 Serge 1673
    /* We do this three times for luck */
4104 Serge 1674
	I915_WRITE(reg, dpll);
2327 Serge 1675
    POSTING_READ(reg);
1676
    udelay(150); /* wait for warmup */
4104 Serge 1677
	I915_WRITE(reg, dpll);
2327 Serge 1678
    POSTING_READ(reg);
1679
    udelay(150); /* wait for warmup */
4104 Serge 1680
	I915_WRITE(reg, dpll);
2327 Serge 1681
    POSTING_READ(reg);
1682
    udelay(150); /* wait for warmup */
1683
}
1684
 
1685
/**
4104 Serge 1686
 * i9xx_disable_pll - disable a PLL
2327 Serge 1687
 * @dev_priv: i915 private structure
1688
 * @pipe: pipe PLL to disable
1689
 *
1690
 * Disable the PLL for @pipe, making sure the pipe is off first.
1691
 *
1692
 * Note!  This is for pre-ILK only.
1693
 */
4104 Serge 1694
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
2327 Serge 1695
{
1696
	/* Don't disable pipe A or pipe A PLLs if needed */
1697
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1698
		return;
1699
 
1700
	/* Make sure the pipe isn't still relying on us */
1701
	assert_pipe_disabled(dev_priv, pipe);
1702
 
4104 Serge 1703
	I915_WRITE(DPLL(pipe), 0);
1704
	POSTING_READ(DPLL(pipe));
2327 Serge 1705
}
1706
 
4539 Serge 1707
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1708
{
1709
	u32 val = 0;
1710
 
1711
	/* Make sure the pipe isn't still relying on us */
1712
	assert_pipe_disabled(dev_priv, pipe);
1713
 
4560 Serge 1714
	/*
1715
	 * Leave integrated clock source and reference clock enabled for pipe B.
1716
	 * The latter is needed for VGA hotplug / manual detection.
1717
	 */
4539 Serge 1718
	if (pipe == PIPE_B)
4560 Serge 1719
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
4539 Serge 1720
	I915_WRITE(DPLL(pipe), val);
1721
	POSTING_READ(DPLL(pipe));
5060 serge 1722
 
4539 Serge 1723
}
1724
 
5060 serge 1725
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1726
{
1727
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1728
	u32 val;
1729
 
1730
	/* Make sure the pipe isn't still relying on us */
1731
	assert_pipe_disabled(dev_priv, pipe);
1732
 
1733
	/* Set PLL en = 0 */
1734
	val = DPLL_SSC_REF_CLOCK_CHV;
1735
	if (pipe != PIPE_A)
1736
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1737
	I915_WRITE(DPLL(pipe), val);
1738
	POSTING_READ(DPLL(pipe));
1739
 
1740
	mutex_lock(&dev_priv->dpio_lock);
1741
 
1742
	/* Disable 10bit clock to display controller */
1743
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1744
	val &= ~DPIO_DCLKP_EN;
1745
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1746
 
1747
	/* disable left/right clock distribution */
1748
	if (pipe != PIPE_B) {
1749
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1750
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1751
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1752
	} else {
1753
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1754
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1755
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1756
	}
1757
 
1758
	mutex_unlock(&dev_priv->dpio_lock);
1759
}
1760
 
4560 Serge 1761
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1762
		struct intel_digital_port *dport)
3031 serge 1763
{
4104 Serge 1764
	u32 port_mask;
5060 serge 1765
	int dpll_reg;
3031 serge 1766
 
4560 Serge 1767
	switch (dport->port) {
1768
	case PORT_B:
4104 Serge 1769
		port_mask = DPLL_PORTB_READY_MASK;
5060 serge 1770
		dpll_reg = DPLL(0);
4560 Serge 1771
		break;
1772
	case PORT_C:
4104 Serge 1773
		port_mask = DPLL_PORTC_READY_MASK;
5060 serge 1774
		dpll_reg = DPLL(0);
4560 Serge 1775
		break;
5060 serge 1776
	case PORT_D:
1777
		port_mask = DPLL_PORTD_READY_MASK;
1778
		dpll_reg = DPIO_PHY_STATUS;
1779
		break;
4560 Serge 1780
	default:
1781
		BUG();
1782
	}
3243 Serge 1783
 
5060 serge 1784
	if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
4104 Serge 1785
		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
5060 serge 1786
		     port_name(dport->port), I915_READ(dpll_reg));
3031 serge 1787
}
1788
 
5060 serge 1789
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1790
{
1791
	struct drm_device *dev = crtc->base.dev;
1792
	struct drm_i915_private *dev_priv = dev->dev_private;
1793
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1794
 
1795
	if (WARN_ON(pll == NULL))
1796
		return;
1797
 
1798
	WARN_ON(!pll->refcount);
1799
	if (pll->active == 0) {
1800
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1801
		WARN_ON(pll->on);
1802
		assert_shared_dpll_disabled(dev_priv, pll);
1803
 
1804
		pll->mode_set(dev_priv, pll);
1805
	}
1806
}
1807
 
2327 Serge 1808
/**
5060 serge 1809
 * intel_enable_shared_dpll - enable PCH PLL
2327 Serge 1810
 * @dev_priv: i915 private structure
1811
 * @pipe: pipe PLL to enable
1812
 *
1813
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1814
 * drives the transcoder clock.
1815
 */
5060 serge 1816
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1817
{
5060 serge 1818
	struct drm_device *dev = crtc->base.dev;
1819
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1820
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1821
 
4104 Serge 1822
	if (WARN_ON(pll == NULL))
2342 Serge 1823
		return;
1824
 
3031 serge 1825
	if (WARN_ON(pll->refcount == 0))
1826
		return;
2327 Serge 1827
 
4104 Serge 1828
	DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1829
		      pll->name, pll->active, pll->on,
1830
		      crtc->base.base.id);
3031 serge 1831
 
4104 Serge 1832
	if (pll->active++) {
1833
		WARN_ON(!pll->on);
1834
		assert_shared_dpll_enabled(dev_priv, pll);
3031 serge 1835
		return;
1836
	}
4104 Serge 1837
	WARN_ON(pll->on);
3031 serge 1838
 
5060 serge 1839
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1840
 
4104 Serge 1841
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1842
	pll->enable(dev_priv, pll);
3031 serge 1843
	pll->on = true;
2327 Serge 1844
}
1845
 
5060 serge 1846
void intel_disable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1847
{
5060 serge 1848
	struct drm_device *dev = crtc->base.dev;
1849
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1850
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1851
 
1852
	/* PCH only available on ILK+ */
5060 serge 1853
	BUG_ON(INTEL_INFO(dev)->gen < 5);
4104 Serge 1854
	if (WARN_ON(pll == NULL))
3031 serge 1855
	       return;
2327 Serge 1856
 
3031 serge 1857
	if (WARN_ON(pll->refcount == 0))
1858
		return;
2327 Serge 1859
 
4104 Serge 1860
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1861
		      pll->name, pll->active, pll->on,
1862
		      crtc->base.base.id);
2342 Serge 1863
 
3031 serge 1864
	if (WARN_ON(pll->active == 0)) {
4104 Serge 1865
		assert_shared_dpll_disabled(dev_priv, pll);
3031 serge 1866
		return;
1867
	}
2342 Serge 1868
 
4104 Serge 1869
	assert_shared_dpll_enabled(dev_priv, pll);
1870
	WARN_ON(!pll->on);
1871
	if (--pll->active)
2342 Serge 1872
		return;
1873
 
4104 Serge 1874
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1875
	pll->disable(dev_priv, pll);
3031 serge 1876
	pll->on = false;
5060 serge 1877
 
1878
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2327 Serge 1879
}
1880
 
3243 Serge 1881
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1882
				    enum pipe pipe)
1883
{
3243 Serge 1884
	struct drm_device *dev = dev_priv->dev;
3031 serge 1885
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4104 Serge 1886
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3243 Serge 1887
	uint32_t reg, val, pipeconf_val;
2327 Serge 1888
 
1889
	/* PCH only available on ILK+ */
5060 serge 1890
	BUG_ON(INTEL_INFO(dev)->gen < 5);
2327 Serge 1891
 
1892
	/* Make sure PCH DPLL is enabled */
4104 Serge 1893
	assert_shared_dpll_enabled(dev_priv,
1894
				   intel_crtc_to_shared_dpll(intel_crtc));
2327 Serge 1895
 
1896
	/* FDI must be feeding us bits for PCH ports */
1897
	assert_fdi_tx_enabled(dev_priv, pipe);
1898
	assert_fdi_rx_enabled(dev_priv, pipe);
1899
 
3243 Serge 1900
	if (HAS_PCH_CPT(dev)) {
1901
		/* Workaround: Set the timing override bit before enabling the
1902
		 * pch transcoder. */
1903
		reg = TRANS_CHICKEN2(pipe);
1904
		val = I915_READ(reg);
1905
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1906
		I915_WRITE(reg, val);
3031 serge 1907
	}
3243 Serge 1908
 
4104 Serge 1909
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1910
	val = I915_READ(reg);
3031 serge 1911
	pipeconf_val = I915_READ(PIPECONF(pipe));
2327 Serge 1912
 
1913
	if (HAS_PCH_IBX(dev_priv->dev)) {
1914
		/*
1915
		 * make the BPC in transcoder be consistent with
1916
		 * that in pipeconf reg.
1917
		 */
3480 Serge 1918
		val &= ~PIPECONF_BPC_MASK;
1919
		val |= pipeconf_val & PIPECONF_BPC_MASK;
2327 Serge 1920
	}
3031 serge 1921
 
1922
	val &= ~TRANS_INTERLACE_MASK;
1923
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1924
		if (HAS_PCH_IBX(dev_priv->dev) &&
1925
		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1926
			val |= TRANS_LEGACY_INTERLACED_ILK;
1927
		else
1928
			val |= TRANS_INTERLACED;
1929
	else
1930
		val |= TRANS_PROGRESSIVE;
1931
 
2327 Serge 1932
	I915_WRITE(reg, val | TRANS_ENABLE);
1933
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4104 Serge 1934
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2327 Serge 1935
}
1936
 
3243 Serge 1937
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1938
				      enum transcoder cpu_transcoder)
1939
{
1940
	u32 val, pipeconf_val;
1941
 
1942
	/* PCH only available on ILK+ */
5060 serge 1943
	BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
3243 Serge 1944
 
1945
	/* FDI must be feeding us bits for PCH ports */
3480 Serge 1946
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
3243 Serge 1947
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1948
 
1949
	/* Workaround: set timing override bit. */
1950
	val = I915_READ(_TRANSA_CHICKEN2);
1951
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1952
	I915_WRITE(_TRANSA_CHICKEN2, val);
1953
 
1954
	val = TRANS_ENABLE;
1955
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1956
 
1957
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1958
	    PIPECONF_INTERLACED_ILK)
1959
		val |= TRANS_INTERLACED;
1960
	else
1961
		val |= TRANS_PROGRESSIVE;
1962
 
4104 Serge 1963
	I915_WRITE(LPT_TRANSCONF, val);
1964
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
3243 Serge 1965
		DRM_ERROR("Failed to enable PCH transcoder\n");
1966
}
1967
 
1968
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1969
				     enum pipe pipe)
1970
{
3243 Serge 1971
	struct drm_device *dev = dev_priv->dev;
1972
	uint32_t reg, val;
2327 Serge 1973
 
1974
	/* FDI relies on the transcoder */
1975
	assert_fdi_tx_disabled(dev_priv, pipe);
1976
	assert_fdi_rx_disabled(dev_priv, pipe);
1977
 
1978
	/* Ports must be off as well */
1979
	assert_pch_ports_disabled(dev_priv, pipe);
1980
 
4104 Serge 1981
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1982
	val = I915_READ(reg);
1983
	val &= ~TRANS_ENABLE;
1984
	I915_WRITE(reg, val);
1985
	/* wait for PCH transcoder off, transcoder state */
1986
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4104 Serge 1987
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
3243 Serge 1988
 
1989
	if (!HAS_PCH_IBX(dev)) {
1990
		/* Workaround: Clear the timing override chicken bit again. */
1991
		reg = TRANS_CHICKEN2(pipe);
1992
		val = I915_READ(reg);
1993
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1994
		I915_WRITE(reg, val);
1995
	}
2327 Serge 1996
}
1997
 
3243 Serge 1998
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1999
{
2000
	u32 val;
2001
 
4104 Serge 2002
	val = I915_READ(LPT_TRANSCONF);
3243 Serge 2003
	val &= ~TRANS_ENABLE;
4104 Serge 2004
	I915_WRITE(LPT_TRANSCONF, val);
3243 Serge 2005
	/* wait for PCH transcoder off, transcoder state */
4104 Serge 2006
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
3243 Serge 2007
		DRM_ERROR("Failed to disable PCH transcoder\n");
2008
 
2009
	/* Workaround: clear timing override bit. */
2010
	val = I915_READ(_TRANSA_CHICKEN2);
2011
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2012
	I915_WRITE(_TRANSA_CHICKEN2, val);
2013
}
2014
 
2327 Serge 2015
/**
2016
 * intel_enable_pipe - enable a pipe, asserting requirements
5060 serge 2017
 * @crtc: crtc responsible for the pipe
2327 Serge 2018
 *
5060 serge 2019
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2327 Serge 2020
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2021
 */
5060 serge 2022
static void intel_enable_pipe(struct intel_crtc *crtc)
2327 Serge 2023
{
5060 serge 2024
	struct drm_device *dev = crtc->base.dev;
2025
	struct drm_i915_private *dev_priv = dev->dev_private;
2026
	enum pipe pipe = crtc->pipe;
3243 Serge 2027
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2028
								      pipe);
3480 Serge 2029
	enum pipe pch_transcoder;
2327 Serge 2030
	int reg;
2031
	u32 val;
2032
 
4104 Serge 2033
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2034
	assert_cursor_disabled(dev_priv, pipe);
4104 Serge 2035
	assert_sprites_disabled(dev_priv, pipe);
2036
 
3480 Serge 2037
	if (HAS_PCH_LPT(dev_priv->dev))
3243 Serge 2038
		pch_transcoder = TRANSCODER_A;
2039
	else
2040
		pch_transcoder = pipe;
2041
 
2327 Serge 2042
	/*
2043
	 * A pipe without a PLL won't actually be able to drive bits from
2044
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2045
	 * need the check.
2046
	 */
2047
	if (!HAS_PCH_SPLIT(dev_priv->dev))
5060 serge 2048
		if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
4560 Serge 2049
			assert_dsi_pll_enabled(dev_priv);
2050
		else
2327 Serge 2051
		assert_pll_enabled(dev_priv, pipe);
2052
	else {
5060 serge 2053
		if (crtc->config.has_pch_encoder) {
2327 Serge 2054
			/* if driving the PCH, we need FDI enabled */
3243 Serge 2055
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
3480 Serge 2056
			assert_fdi_tx_pll_enabled(dev_priv,
2057
						  (enum pipe) cpu_transcoder);
2327 Serge 2058
		}
2059
		/* FIXME: assert CPU port conditions for SNB+ */
2060
	}
2061
 
3243 Serge 2062
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2063
	val = I915_READ(reg);
5060 serge 2064
	if (val & PIPECONF_ENABLE) {
2065
		WARN_ON(!(pipe == PIPE_A &&
2066
			  dev_priv->quirks & QUIRK_PIPEA_FORCE));
2327 Serge 2067
		return;
5060 serge 2068
	}
2327 Serge 2069
 
2070
	I915_WRITE(reg, val | PIPECONF_ENABLE);
5060 serge 2071
	POSTING_READ(reg);
2327 Serge 2072
}
2073
 
2074
/**
2075
 * intel_disable_pipe - disable a pipe, asserting requirements
2076
 * @dev_priv: i915 private structure
2077
 * @pipe: pipe to disable
2078
 *
2079
 * Disable @pipe, making sure that various hardware specific requirements
2080
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
2081
 *
2082
 * @pipe should be %PIPE_A or %PIPE_B.
2083
 *
2084
 * Will wait until the pipe has shut down before returning.
2085
 */
2086
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
2087
			       enum pipe pipe)
2088
{
3243 Serge 2089
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2090
								      pipe);
2327 Serge 2091
	int reg;
2092
	u32 val;
2093
 
3031 serge 2094
    /*
2327 Serge 2095
	 * Make sure planes won't keep trying to pump pixels to us,
2096
	 * or we might hang the display.
2097
	 */
2098
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 2099
	assert_cursor_disabled(dev_priv, pipe);
3746 Serge 2100
	assert_sprites_disabled(dev_priv, pipe);
2327 Serge 2101
 
2102
	/* Don't disable pipe A or pipe A PLLs if needed */
2103
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2104
		return;
2105
 
3243 Serge 2106
	reg = PIPECONF(cpu_transcoder);
2327 Serge 2107
	val = I915_READ(reg);
2108
	if ((val & PIPECONF_ENABLE) == 0)
2109
		return;
2110
 
2111
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
2112
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
2113
}
2114
 
2115
/*
2116
 * Plane regs are double buffered, going from enabled->disabled needs a
2117
 * trigger in order to latch.  The display address reg provides this.
2118
 */
4560 Serge 2119
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2327 Serge 2120
				      enum plane plane)
2121
{
5060 serge 2122
	struct drm_device *dev = dev_priv->dev;
2123
	u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
4560 Serge 2124
 
2125
	I915_WRITE(reg, I915_READ(reg));
2126
	POSTING_READ(reg);
2327 Serge 2127
}
2128
 
2129
/**
5060 serge 2130
 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2327 Serge 2131
 * @dev_priv: i915 private structure
2132
 * @plane: plane to enable
2133
 * @pipe: pipe being fed
2134
 *
2135
 * Enable @plane on @pipe, making sure that @pipe is running first.
2136
 */
5060 serge 2137
static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
2327 Serge 2138
			       enum plane plane, enum pipe pipe)
2139
{
5060 serge 2140
	struct drm_device *dev = dev_priv->dev;
4560 Serge 2141
	struct intel_crtc *intel_crtc =
2142
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2327 Serge 2143
	int reg;
2144
	u32 val;
2145
 
2146
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
2147
	assert_pipe_enabled(dev_priv, pipe);
2148
 
5060 serge 2149
	if (intel_crtc->primary_enabled)
2150
		return;
4560 Serge 2151
 
2152
	intel_crtc->primary_enabled = true;
2153
 
2327 Serge 2154
	reg = DSPCNTR(plane);
2155
	val = I915_READ(reg);
5060 serge 2156
	WARN_ON(val & DISPLAY_PLANE_ENABLE);
2327 Serge 2157
 
2158
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
4560 Serge 2159
	intel_flush_primary_plane(dev_priv, plane);
2327 Serge 2160
}
2161
 
2162
/**
5060 serge 2163
 * intel_disable_primary_hw_plane - disable the primary hardware plane
2327 Serge 2164
 * @dev_priv: i915 private structure
2165
 * @plane: plane to disable
2166
 * @pipe: pipe consuming the data
2167
 *
2168
 * Disable @plane; should be an independent operation.
2169
 */
5060 serge 2170
static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
2327 Serge 2171
				enum plane plane, enum pipe pipe)
2172
{
4560 Serge 2173
	struct intel_crtc *intel_crtc =
2174
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2327 Serge 2175
	int reg;
2176
	u32 val;
2177
 
5060 serge 2178
	if (!intel_crtc->primary_enabled)
2179
		return;
4560 Serge 2180
 
2181
	intel_crtc->primary_enabled = false;
2182
 
2327 Serge 2183
	reg = DSPCNTR(plane);
2184
	val = I915_READ(reg);
5060 serge 2185
	WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
2327 Serge 2186
 
2187
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
4560 Serge 2188
	intel_flush_primary_plane(dev_priv, plane);
2327 Serge 2189
}
2190
 
3746 Serge 2191
static bool need_vtd_wa(struct drm_device *dev)
2192
{
2193
#ifdef CONFIG_INTEL_IOMMU
2194
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2195
		return true;
2196
#endif
2197
	return false;
2198
}
2199
 
5060 serge 2200
static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2201
{
2202
	int tile_height;
2203
 
2204
	tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
2205
	return ALIGN(height, tile_height);
2206
}
2207
 
2335 Serge 2208
int
2209
intel_pin_and_fence_fb_obj(struct drm_device *dev,
2210
			   struct drm_i915_gem_object *obj,
5060 serge 2211
			   struct intel_engine_cs *pipelined)
2335 Serge 2212
{
2213
	struct drm_i915_private *dev_priv = dev->dev_private;
2214
	u32 alignment;
2215
	int ret;
2327 Serge 2216
 
5060 serge 2217
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2218
 
2335 Serge 2219
	switch (obj->tiling_mode) {
2220
	case I915_TILING_NONE:
2221
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2222
			alignment = 128 * 1024;
2223
		else if (INTEL_INFO(dev)->gen >= 4)
2224
			alignment = 4 * 1024;
2225
		else
2226
			alignment = 64 * 1024;
2227
		break;
2228
	case I915_TILING_X:
2229
		/* pin() will align the object as required by fence */
2230
		alignment = 0;
2231
		break;
2232
	case I915_TILING_Y:
4560 Serge 2233
		WARN(1, "Y tiled bo slipped through, driver bug!\n");
2335 Serge 2234
		return -EINVAL;
2235
	default:
2236
		BUG();
2237
	}
2327 Serge 2238
 
3746 Serge 2239
	/* Note that the w/a also requires 64 PTE of padding following the
2240
	 * bo. We currently fill all unused PTE with the shadow page and so
2241
	 * we should always have valid PTE following the scanout preventing
2242
	 * the VT-d warning.
2243
	 */
2244
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2245
		alignment = 256 * 1024;
2246
 
5097 serge 2247
	/*
2248
	 * Global gtt pte registers are special registers which actually forward
2249
	 * writes to a chunk of system memory. Which means that there is no risk
2250
	 * that the register values disappear as soon as we call
2251
	 * intel_runtime_pm_put(), so it is correct to wrap only the
2252
	 * pin/unpin/fence and not more.
2253
	 */
2254
	intel_runtime_pm_get(dev_priv);
2255
 
2335 Serge 2256
	dev_priv->mm.interruptible = false;
2257
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2258
	if (ret)
2259
		goto err_interruptible;
2327 Serge 2260
 
2335 Serge 2261
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2262
	 * fence, whereas 965+ only requires a fence if using
2263
	 * framebuffer compression.  For simplicity, we always install
2264
	 * a fence as the cost is not that onerous.
2265
	 */
3480 Serge 2266
	ret = i915_gem_object_get_fence(obj);
2267
	if (ret)
2268
		goto err_unpin;
2327 Serge 2269
 
3480 Serge 2270
	i915_gem_object_pin_fence(obj);
2271
 
2335 Serge 2272
	dev_priv->mm.interruptible = true;
5097 serge 2273
	intel_runtime_pm_put(dev_priv);
2335 Serge 2274
	return 0;
2327 Serge 2275
 
2335 Serge 2276
err_unpin:
4104 Serge 2277
	i915_gem_object_unpin_from_display_plane(obj);
2335 Serge 2278
err_interruptible:
2279
	dev_priv->mm.interruptible = true;
5097 serge 2280
	intel_runtime_pm_put(dev_priv);
2335 Serge 2281
	return ret;
2282
}
2327 Serge 2283
 
3031 serge 2284
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2285
{
5060 serge 2286
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2287
 
2288
	i915_gem_object_unpin_fence(obj);
2289
//	i915_gem_object_unpin_from_display_plane(obj);
3031 serge 2290
}
2291
 
2292
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2293
 * is assumed to be a power-of-two. */
3480 Serge 2294
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2295
					     unsigned int tiling_mode,
2296
					     unsigned int cpp,
3031 serge 2297
							unsigned int pitch)
2298
{
3480 Serge 2299
	if (tiling_mode != I915_TILING_NONE) {
2300
		unsigned int tile_rows, tiles;
3031 serge 2301
 
2302
	tile_rows = *y / 8;
2303
	*y %= 8;
2304
 
3480 Serge 2305
		tiles = *x / (512/cpp);
2306
		*x %= 512/cpp;
2307
 
3031 serge 2308
	return tile_rows * pitch * 8 + tiles * 4096;
3480 Serge 2309
	} else {
2310
		unsigned int offset;
2311
 
2312
		offset = *y * pitch + *x * cpp;
2313
		*y = 0;
2314
		*x = (offset & 4095) / cpp;
2315
		return offset & -4096;
2316
	}
3031 serge 2317
}
2318
 
5060 serge 2319
int intel_format_to_fourcc(int format)
2327 Serge 2320
{
5060 serge 2321
	switch (format) {
2322
	case DISPPLANE_8BPP:
2323
		return DRM_FORMAT_C8;
2324
	case DISPPLANE_BGRX555:
2325
		return DRM_FORMAT_XRGB1555;
2326
	case DISPPLANE_BGRX565:
2327
		return DRM_FORMAT_RGB565;
2328
	default:
2329
	case DISPPLANE_BGRX888:
2330
		return DRM_FORMAT_XRGB8888;
2331
	case DISPPLANE_RGBX888:
2332
		return DRM_FORMAT_XBGR8888;
2333
	case DISPPLANE_BGRX101010:
2334
		return DRM_FORMAT_XRGB2101010;
2335
	case DISPPLANE_RGBX101010:
2336
		return DRM_FORMAT_XBGR2101010;
2337
	}
2338
}
2339
 
2340
static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2341
				  struct intel_plane_config *plane_config)
2342
{
2343
	struct drm_device *dev = crtc->base.dev;
2344
	struct drm_i915_gem_object *obj = NULL;
2345
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2346
	u32 base = plane_config->base;
2347
 
2348
	if (plane_config->size == 0)
2349
		return false;
2350
 
2351
	obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2352
							     plane_config->size);
2353
	if (!obj)
2354
		return false;
2355
 
2356
    main_fb_obj = obj;
2357
 
2358
	if (plane_config->tiled) {
2359
		obj->tiling_mode = I915_TILING_X;
2360
		obj->stride = crtc->base.primary->fb->pitches[0];
2361
	}
2362
 
2363
	mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2364
	mode_cmd.width = crtc->base.primary->fb->width;
2365
	mode_cmd.height = crtc->base.primary->fb->height;
2366
	mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2367
 
2368
	mutex_lock(&dev->struct_mutex);
2369
 
2370
	if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2371
				   &mode_cmd, obj)) {
2372
		DRM_DEBUG_KMS("intel fb init failed\n");
2373
		goto out_unref_obj;
2374
	}
2375
 
2376
	obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2377
	mutex_unlock(&dev->struct_mutex);
2378
 
2379
	DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2380
	return true;
2381
 
2382
out_unref_obj:
2383
	drm_gem_object_unreference(&obj->base);
2384
	mutex_unlock(&dev->struct_mutex);
2385
	return false;
2386
}
2387
 
2388
static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2389
				 struct intel_plane_config *plane_config)
2390
{
2391
	struct drm_device *dev = intel_crtc->base.dev;
2392
	struct drm_crtc *c;
2393
	struct intel_crtc *i;
2394
	struct drm_i915_gem_object *obj;
2395
 
2396
	if (!intel_crtc->base.primary->fb)
2397
		return;
2398
 
2399
	if (intel_alloc_plane_obj(intel_crtc, plane_config))
2400
		return;
2401
 
2402
	kfree(intel_crtc->base.primary->fb);
2403
	intel_crtc->base.primary->fb = NULL;
2404
 
2405
	/*
2406
	 * Failed to alloc the obj, check to see if we should share
2407
	 * an fb with another CRTC instead
2408
	 */
2409
	for_each_crtc(dev, c) {
2410
		i = to_intel_crtc(c);
2411
 
2412
		if (c == &intel_crtc->base)
2413
			continue;
2414
 
2415
		if (!i->active)
2416
			continue;
2417
 
2418
		obj = intel_fb_obj(c->primary->fb);
2419
		if (obj == NULL)
2420
			continue;
2421
 
2422
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2423
			drm_framebuffer_reference(c->primary->fb);
2424
			intel_crtc->base.primary->fb = c->primary->fb;
2425
			obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2426
			break;
2427
		}
2428
	}
2429
}
2430
 
2431
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2432
				     struct drm_framebuffer *fb,
2433
				     int x, int y)
2434
{
2327 Serge 2435
    struct drm_device *dev = crtc->dev;
2436
    struct drm_i915_private *dev_priv = dev->dev_private;
2437
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 2438
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2327 Serge 2439
    int plane = intel_crtc->plane;
3031 serge 2440
	unsigned long linear_offset;
2327 Serge 2441
    u32 dspcntr;
2442
    u32 reg;
2443
 
2444
    reg = DSPCNTR(plane);
2445
    dspcntr = I915_READ(reg);
2446
    /* Mask out pixel format bits in case we change it */
2447
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
3243 Serge 2448
	switch (fb->pixel_format) {
2449
	case DRM_FORMAT_C8:
2327 Serge 2450
        dspcntr |= DISPPLANE_8BPP;
2451
        break;
3243 Serge 2452
	case DRM_FORMAT_XRGB1555:
2453
	case DRM_FORMAT_ARGB1555:
2454
		dspcntr |= DISPPLANE_BGRX555;
2455
		break;
2456
	case DRM_FORMAT_RGB565:
2457
		dspcntr |= DISPPLANE_BGRX565;
2458
		break;
2459
	case DRM_FORMAT_XRGB8888:
2460
	case DRM_FORMAT_ARGB8888:
2461
		dspcntr |= DISPPLANE_BGRX888;
2462
		break;
2463
	case DRM_FORMAT_XBGR8888:
2464
	case DRM_FORMAT_ABGR8888:
2465
		dspcntr |= DISPPLANE_RGBX888;
2466
		break;
2467
	case DRM_FORMAT_XRGB2101010:
2468
	case DRM_FORMAT_ARGB2101010:
2469
		dspcntr |= DISPPLANE_BGRX101010;
2327 Serge 2470
        break;
3243 Serge 2471
	case DRM_FORMAT_XBGR2101010:
2472
	case DRM_FORMAT_ABGR2101010:
2473
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 2474
        break;
2475
    default:
3746 Serge 2476
		BUG();
2327 Serge 2477
    }
3243 Serge 2478
 
2327 Serge 2479
    if (INTEL_INFO(dev)->gen >= 4) {
2480
        if (obj->tiling_mode != I915_TILING_NONE)
2481
            dspcntr |= DISPPLANE_TILED;
2482
        else
2483
            dspcntr &= ~DISPPLANE_TILED;
2484
    }
2485
 
4104 Serge 2486
	if (IS_G4X(dev))
2487
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2488
 
2327 Serge 2489
    I915_WRITE(reg, dspcntr);
2490
 
3031 serge 2491
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2327 Serge 2492
 
3031 serge 2493
	if (INTEL_INFO(dev)->gen >= 4) {
2494
		intel_crtc->dspaddr_offset =
3480 Serge 2495
			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
3031 serge 2496
							   fb->bits_per_pixel / 8,
2497
							   fb->pitches[0]);
2498
		linear_offset -= intel_crtc->dspaddr_offset;
2499
	} else {
2500
		intel_crtc->dspaddr_offset = linear_offset;
2501
	}
2502
 
4104 Serge 2503
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2504
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2505
		      fb->pitches[0]);
2342 Serge 2506
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2327 Serge 2507
    if (INTEL_INFO(dev)->gen >= 4) {
4560 Serge 2508
		I915_WRITE(DSPSURF(plane),
4104 Serge 2509
				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2327 Serge 2510
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2511
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2327 Serge 2512
    } else
4104 Serge 2513
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2327 Serge 2514
    POSTING_READ(reg);
2515
}
2516
 
5060 serge 2517
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2518
					 struct drm_framebuffer *fb,
2519
					 int x, int y)
2327 Serge 2520
{
2521
    struct drm_device *dev = crtc->dev;
2522
    struct drm_i915_private *dev_priv = dev->dev_private;
2523
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 2524
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2327 Serge 2525
    int plane = intel_crtc->plane;
3031 serge 2526
	unsigned long linear_offset;
2327 Serge 2527
    u32 dspcntr;
2528
    u32 reg;
2529
 
2530
    reg = DSPCNTR(plane);
2531
    dspcntr = I915_READ(reg);
2532
    /* Mask out pixel format bits in case we change it */
2533
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
3243 Serge 2534
	switch (fb->pixel_format) {
2535
	case DRM_FORMAT_C8:
2327 Serge 2536
        dspcntr |= DISPPLANE_8BPP;
2537
        break;
3243 Serge 2538
	case DRM_FORMAT_RGB565:
2539
		dspcntr |= DISPPLANE_BGRX565;
2327 Serge 2540
        break;
3243 Serge 2541
	case DRM_FORMAT_XRGB8888:
2542
	case DRM_FORMAT_ARGB8888:
2543
		dspcntr |= DISPPLANE_BGRX888;
2544
		break;
2545
	case DRM_FORMAT_XBGR8888:
2546
	case DRM_FORMAT_ABGR8888:
2547
		dspcntr |= DISPPLANE_RGBX888;
2548
		break;
2549
	case DRM_FORMAT_XRGB2101010:
2550
	case DRM_FORMAT_ARGB2101010:
2551
		dspcntr |= DISPPLANE_BGRX101010;
2552
		break;
2553
	case DRM_FORMAT_XBGR2101010:
2554
	case DRM_FORMAT_ABGR2101010:
2555
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 2556
        break;
2557
    default:
3746 Serge 2558
		BUG();
2327 Serge 2559
    }
2560
 
3480 Serge 2561
	if (obj->tiling_mode != I915_TILING_NONE)
2562
		dspcntr |= DISPPLANE_TILED;
2563
	else
2327 Serge 2564
        dspcntr &= ~DISPPLANE_TILED;
2565
 
4560 Serge 2566
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 2567
		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2568
	else
2327 Serge 2569
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2570
 
2571
    I915_WRITE(reg, dspcntr);
2572
 
3031 serge 2573
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2574
	intel_crtc->dspaddr_offset =
3480 Serge 2575
		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
3031 serge 2576
						   fb->bits_per_pixel / 8,
2577
						   fb->pitches[0]);
2578
	linear_offset -= intel_crtc->dspaddr_offset;
2327 Serge 2579
 
4104 Serge 2580
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2581
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2582
		      fb->pitches[0]);
2342 Serge 2583
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
4560 Serge 2584
	I915_WRITE(DSPSURF(plane),
4104 Serge 2585
			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
4560 Serge 2586
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3243 Serge 2587
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2588
	} else {
2330 Serge 2589
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2590
	I915_WRITE(DSPLINOFF(plane), linear_offset);
3243 Serge 2591
	}
2330 Serge 2592
	POSTING_READ(reg);
2327 Serge 2593
}
2594
 
2595
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2596
static int
2597
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2598
			   int x, int y, enum mode_set_atomic state)
2599
{
2600
	struct drm_device *dev = crtc->dev;
2601
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2602
 
2603
	if (dev_priv->display.disable_fbc)
2604
		dev_priv->display.disable_fbc(dev);
5060 serge 2605
	intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
3031 serge 2606
 
5060 serge 2607
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2608
 
2609
	return 0;
3031 serge 2610
}
2611
 
2612
#if 0
4104 Serge 2613
void intel_display_handle_reset(struct drm_device *dev)
2614
{
2615
	struct drm_i915_private *dev_priv = dev->dev_private;
2616
	struct drm_crtc *crtc;
2617
 
2618
	/*
2619
	 * Flips in the rings have been nuked by the reset,
2620
	 * so complete all pending flips so that user space
2621
	 * will get its events and not get stuck.
2622
	 *
2623
	 * Also update the base address of all primary
2624
	 * planes to the the last fb to make sure we're
2625
	 * showing the correct fb after a reset.
2626
	 *
2627
	 * Need to make two loops over the crtcs so that we
2628
	 * don't try to grab a crtc mutex before the
2629
	 * pending_flip_queue really got woken up.
2630
	 */
2631
 
5060 serge 2632
	for_each_crtc(dev, crtc) {
4104 Serge 2633
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2634
		enum plane plane = intel_crtc->plane;
2635
 
2636
		intel_prepare_page_flip(dev, plane);
2637
		intel_finish_page_flip_plane(dev, plane);
2638
	}
2639
 
5060 serge 2640
	for_each_crtc(dev, crtc) {
4104 Serge 2641
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2642
 
5060 serge 2643
		drm_modeset_lock(&crtc->mutex, NULL);
4560 Serge 2644
		/*
2645
		 * FIXME: Once we have proper support for primary planes (and
2646
		 * disabling them without disabling the entire crtc) allow again
5060 serge 2647
		 * a NULL crtc->primary->fb.
4560 Serge 2648
		 */
5060 serge 2649
		if (intel_crtc->active && crtc->primary->fb)
2650
			dev_priv->display.update_primary_plane(crtc,
2651
							       crtc->primary->fb,
2652
							       crtc->x,
2653
							       crtc->y);
2654
		drm_modeset_unlock(&crtc->mutex);
4104 Serge 2655
	}
2656
}
2657
 
3031 serge 2658
static int
2659
intel_finish_fb(struct drm_framebuffer *old_fb)
2660
{
5060 serge 2661
	struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
3031 serge 2662
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2663
	bool was_interruptible = dev_priv->mm.interruptible;
2327 Serge 2664
	int ret;
2665
 
3031 serge 2666
	/* Big Hammer, we also need to ensure that any pending
2667
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2668
	 * current scanout is retired before unpinning the old
2669
	 * framebuffer.
2670
	 *
2671
	 * This should only fail upon a hung GPU, in which case we
2672
	 * can safely continue.
2673
	 */
2674
	dev_priv->mm.interruptible = false;
2675
	ret = i915_gem_object_finish_gpu(obj);
2676
	dev_priv->mm.interruptible = was_interruptible;
2327 Serge 2677
 
3031 serge 2678
	return ret;
2327 Serge 2679
}
4104 Serge 2680
 
5060 serge 2681
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
4104 Serge 2682
{
2683
	struct drm_device *dev = crtc->dev;
5060 serge 2684
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 2685
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 2686
	unsigned long flags;
2687
	bool pending;
4104 Serge 2688
 
5060 serge 2689
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2690
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2691
		return false;
4104 Serge 2692
 
5060 serge 2693
	spin_lock_irqsave(&dev->event_lock, flags);
2694
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2695
	spin_unlock_irqrestore(&dev->event_lock, flags);
4104 Serge 2696
 
5060 serge 2697
	return pending;
4104 Serge 2698
}
3031 serge 2699
#endif
2327 Serge 2700
 
2701
static int
2702
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
3031 serge 2703
		    struct drm_framebuffer *fb)
2327 Serge 2704
{
2705
	struct drm_device *dev = crtc->dev;
3031 serge 2706
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 2707
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 2708
	enum pipe pipe = intel_crtc->pipe;
2709
	struct drm_framebuffer *old_fb = crtc->primary->fb;
2710
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2711
	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
2342 Serge 2712
	int ret;
2327 Serge 2713
 
5060 serge 2714
 
2327 Serge 2715
	/* no fb bound */
3031 serge 2716
	if (!fb) {
2327 Serge 2717
		DRM_ERROR("No FB bound\n");
2718
		return 0;
2719
	}
2720
 
3746 Serge 2721
	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
4104 Serge 2722
		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2723
			  plane_name(intel_crtc->plane),
3746 Serge 2724
				INTEL_INFO(dev)->num_pipes);
2327 Serge 2725
		return -EINVAL;
2726
	}
2727
 
2728
	mutex_lock(&dev->struct_mutex);
5060 serge 2729
	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
2730
	if (ret == 0)
2731
		i915_gem_track_fb(old_obj, obj,
2732
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
2733
	mutex_unlock(&dev->struct_mutex);
4280 Serge 2734
    if (ret != 0) {
2735
       DRM_ERROR("pin & fence failed\n");
2736
       return ret;
2737
    }
2327 Serge 2738
 
4560 Serge 2739
	/*
2740
	 * Update pipe size and adjust fitter if needed: the reason for this is
2741
	 * that in compute_mode_changes we check the native mode (not the pfit
2742
	 * mode) to see if we can flip rather than do a full mode set. In the
2743
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
2744
	 * pfit state, we'll end up with a big fb scanned out into the wrong
2745
	 * sized surface.
2746
	 *
2747
	 * To fix this properly, we need to hoist the checks up into
2748
	 * compute_mode_changes (or above), check the actual pfit state and
2749
	 * whether the platform allows pfit disable with pipe active, and only
2750
	 * then update the pipesrc and pfit state, even on the flip path.
2751
	 */
5060 serge 2752
	if (i915.fastboot) {
4560 Serge 2753
		const struct drm_display_mode *adjusted_mode =
2754
			&intel_crtc->config.adjusted_mode;
2755
 
4280 Serge 2756
		I915_WRITE(PIPESRC(intel_crtc->pipe),
4560 Serge 2757
			   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2758
			   (adjusted_mode->crtc_vdisplay - 1));
4280 Serge 2759
		if (!intel_crtc->config.pch_pfit.enabled &&
2760
		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2761
		     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2762
			I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2763
			I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2764
			I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2765
		}
4560 Serge 2766
		intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2767
		intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
4280 Serge 2768
	}
3031 serge 2769
 
5060 serge 2770
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2327 Serge 2771
 
5060 serge 2772
	if (intel_crtc->active)
2773
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
2774
 
2775
	crtc->primary->fb = fb;
3031 serge 2776
	crtc->x = x;
2777
	crtc->y = y;
2778
 
2779
	if (old_fb) {
4104 Serge 2780
		if (intel_crtc->active && old_fb != fb)
3031 serge 2781
		intel_wait_for_vblank(dev, intel_crtc->pipe);
5060 serge 2782
		mutex_lock(&dev->struct_mutex);
2783
		intel_unpin_fb_obj(old_obj);
2784
		mutex_unlock(&dev->struct_mutex);
3031 serge 2785
	}
2786
 
5060 serge 2787
	mutex_lock(&dev->struct_mutex);
3031 serge 2788
	intel_update_fbc(dev);
2336 Serge 2789
	mutex_unlock(&dev->struct_mutex);
2327 Serge 2790
 
2336 Serge 2791
    return 0;
2327 Serge 2792
}
2793
 
2794
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2795
{
2796
	struct drm_device *dev = crtc->dev;
2797
	struct drm_i915_private *dev_priv = dev->dev_private;
2798
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2799
	int pipe = intel_crtc->pipe;
2800
	u32 reg, temp;
2801
 
2802
	/* enable normal train */
2803
	reg = FDI_TX_CTL(pipe);
2804
	temp = I915_READ(reg);
2805
	if (IS_IVYBRIDGE(dev)) {
2806
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2807
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2808
	} else {
2809
		temp &= ~FDI_LINK_TRAIN_NONE;
2810
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2811
	}
2812
	I915_WRITE(reg, temp);
2813
 
2814
	reg = FDI_RX_CTL(pipe);
2815
	temp = I915_READ(reg);
2816
	if (HAS_PCH_CPT(dev)) {
2817
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2818
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2819
	} else {
2820
		temp &= ~FDI_LINK_TRAIN_NONE;
2821
		temp |= FDI_LINK_TRAIN_NONE;
2822
	}
2823
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2824
 
2825
	/* wait one idle pattern time */
2826
	POSTING_READ(reg);
2827
	udelay(1000);
2828
 
2829
	/* IVB wants error correction enabled */
2830
	if (IS_IVYBRIDGE(dev))
2831
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2832
			   FDI_FE_ERRC_ENABLE);
2833
}
2834
 
4280 Serge 2835
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
4104 Serge 2836
{
4280 Serge 2837
	return crtc->base.enabled && crtc->active &&
2838
		crtc->config.has_pch_encoder;
4104 Serge 2839
}
2840
 
3243 Serge 2841
static void ivb_modeset_global_resources(struct drm_device *dev)
2327 Serge 2842
{
2843
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 2844
	struct intel_crtc *pipe_B_crtc =
2845
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2846
	struct intel_crtc *pipe_C_crtc =
2847
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2848
	uint32_t temp;
2327 Serge 2849
 
4104 Serge 2850
	/*
2851
	 * When everything is off disable fdi C so that we could enable fdi B
2852
	 * with all lanes. Note that we don't care about enabled pipes without
2853
	 * an enabled pch encoder.
2854
	 */
2855
	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2856
	    !pipe_has_enabled_pch(pipe_C_crtc)) {
3243 Serge 2857
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2858
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2859
 
2860
		temp = I915_READ(SOUTH_CHICKEN1);
2861
		temp &= ~FDI_BC_BIFURCATION_SELECT;
2862
		DRM_DEBUG_KMS("disabling fdi C rx\n");
2863
		I915_WRITE(SOUTH_CHICKEN1, temp);
2864
	}
2327 Serge 2865
}
2866
 
2867
/* The FDI link training functions for ILK/Ibexpeak. */
2868
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2869
{
2870
    struct drm_device *dev = crtc->dev;
2871
    struct drm_i915_private *dev_priv = dev->dev_private;
2872
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2873
    int pipe = intel_crtc->pipe;
2874
    u32 reg, temp, tries;
2875
 
5060 serge 2876
	/* FDI needs bits from pipe first */
2327 Serge 2877
    assert_pipe_enabled(dev_priv, pipe);
2878
 
2879
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2880
       for train result */
2881
    reg = FDI_RX_IMR(pipe);
2882
    temp = I915_READ(reg);
2883
    temp &= ~FDI_RX_SYMBOL_LOCK;
2884
    temp &= ~FDI_RX_BIT_LOCK;
2885
    I915_WRITE(reg, temp);
2886
    I915_READ(reg);
2887
    udelay(150);
2888
 
2889
    /* enable CPU FDI TX and PCH FDI RX */
2890
    reg = FDI_TX_CTL(pipe);
2891
    temp = I915_READ(reg);
4104 Serge 2892
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2893
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 2894
    temp &= ~FDI_LINK_TRAIN_NONE;
2895
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2896
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2897
 
2898
    reg = FDI_RX_CTL(pipe);
2899
    temp = I915_READ(reg);
2900
    temp &= ~FDI_LINK_TRAIN_NONE;
2901
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2902
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2903
 
2904
    POSTING_READ(reg);
2905
    udelay(150);
2906
 
2907
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2908
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2909
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2910
               FDI_RX_PHASE_SYNC_POINTER_EN);
2911
 
2912
    reg = FDI_RX_IIR(pipe);
2913
    for (tries = 0; tries < 5; tries++) {
2914
        temp = I915_READ(reg);
2915
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2916
 
2917
        if ((temp & FDI_RX_BIT_LOCK)) {
2918
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2919
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2920
            break;
2921
        }
2922
    }
2923
    if (tries == 5)
2924
        DRM_ERROR("FDI train 1 fail!\n");
2925
 
2926
    /* Train 2 */
2927
    reg = FDI_TX_CTL(pipe);
2928
    temp = I915_READ(reg);
2929
    temp &= ~FDI_LINK_TRAIN_NONE;
2930
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2931
    I915_WRITE(reg, temp);
2932
 
2933
    reg = FDI_RX_CTL(pipe);
2934
    temp = I915_READ(reg);
2935
    temp &= ~FDI_LINK_TRAIN_NONE;
2936
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2937
    I915_WRITE(reg, temp);
2938
 
2939
    POSTING_READ(reg);
2940
    udelay(150);
2941
 
2942
    reg = FDI_RX_IIR(pipe);
2943
    for (tries = 0; tries < 5; tries++) {
2944
        temp = I915_READ(reg);
2945
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2946
 
2947
        if (temp & FDI_RX_SYMBOL_LOCK) {
2948
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2949
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2950
            break;
2951
        }
2952
    }
2953
    if (tries == 5)
2954
        DRM_ERROR("FDI train 2 fail!\n");
2955
 
2956
    DRM_DEBUG_KMS("FDI train done\n");
2957
 
2958
}
2959
 
2342 Serge 2960
static const int snb_b_fdi_train_param[] = {
2327 Serge 2961
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2962
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2963
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2964
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2965
};
2966
 
2967
/* The FDI link training functions for SNB/Cougarpoint. */
2968
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2969
{
2970
    struct drm_device *dev = crtc->dev;
2971
    struct drm_i915_private *dev_priv = dev->dev_private;
2972
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2973
    int pipe = intel_crtc->pipe;
3031 serge 2974
	u32 reg, temp, i, retry;
2327 Serge 2975
 
2976
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2977
       for train result */
2978
    reg = FDI_RX_IMR(pipe);
2979
    temp = I915_READ(reg);
2980
    temp &= ~FDI_RX_SYMBOL_LOCK;
2981
    temp &= ~FDI_RX_BIT_LOCK;
2982
    I915_WRITE(reg, temp);
2983
 
2984
    POSTING_READ(reg);
2985
    udelay(150);
2986
 
2987
    /* enable CPU FDI TX and PCH FDI RX */
2988
    reg = FDI_TX_CTL(pipe);
2989
    temp = I915_READ(reg);
4104 Serge 2990
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2991
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 2992
    temp &= ~FDI_LINK_TRAIN_NONE;
2993
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2994
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2995
    /* SNB-B */
2996
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2997
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2998
 
3243 Serge 2999
	I915_WRITE(FDI_RX_MISC(pipe),
3000
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3001
 
2327 Serge 3002
    reg = FDI_RX_CTL(pipe);
3003
    temp = I915_READ(reg);
3004
    if (HAS_PCH_CPT(dev)) {
3005
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3006
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3007
    } else {
3008
        temp &= ~FDI_LINK_TRAIN_NONE;
3009
        temp |= FDI_LINK_TRAIN_PATTERN_1;
3010
    }
3011
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3012
 
3013
    POSTING_READ(reg);
3014
    udelay(150);
3015
 
2342 Serge 3016
	for (i = 0; i < 4; i++) {
2327 Serge 3017
        reg = FDI_TX_CTL(pipe);
3018
        temp = I915_READ(reg);
3019
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3020
        temp |= snb_b_fdi_train_param[i];
3021
        I915_WRITE(reg, temp);
3022
 
3023
        POSTING_READ(reg);
3024
        udelay(500);
3025
 
3031 serge 3026
		for (retry = 0; retry < 5; retry++) {
2327 Serge 3027
        reg = FDI_RX_IIR(pipe);
3028
        temp = I915_READ(reg);
3029
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3030
        if (temp & FDI_RX_BIT_LOCK) {
3031
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3032
            DRM_DEBUG_KMS("FDI train 1 done.\n");
3033
            break;
3034
        }
3031 serge 3035
			udelay(50);
3036
		}
3037
		if (retry < 5)
3038
			break;
2327 Serge 3039
    }
3040
    if (i == 4)
3041
        DRM_ERROR("FDI train 1 fail!\n");
3042
 
3043
    /* Train 2 */
3044
    reg = FDI_TX_CTL(pipe);
3045
    temp = I915_READ(reg);
3046
    temp &= ~FDI_LINK_TRAIN_NONE;
3047
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3048
    if (IS_GEN6(dev)) {
3049
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3050
        /* SNB-B */
3051
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3052
    }
3053
    I915_WRITE(reg, temp);
3054
 
3055
    reg = FDI_RX_CTL(pipe);
3056
    temp = I915_READ(reg);
3057
    if (HAS_PCH_CPT(dev)) {
3058
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3059
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3060
    } else {
3061
        temp &= ~FDI_LINK_TRAIN_NONE;
3062
        temp |= FDI_LINK_TRAIN_PATTERN_2;
3063
    }
3064
    I915_WRITE(reg, temp);
3065
 
3066
    POSTING_READ(reg);
3067
    udelay(150);
3068
 
2342 Serge 3069
	for (i = 0; i < 4; i++) {
2327 Serge 3070
        reg = FDI_TX_CTL(pipe);
3071
        temp = I915_READ(reg);
3072
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3073
        temp |= snb_b_fdi_train_param[i];
3074
        I915_WRITE(reg, temp);
3075
 
3076
        POSTING_READ(reg);
3077
        udelay(500);
3078
 
3031 serge 3079
		for (retry = 0; retry < 5; retry++) {
2327 Serge 3080
        reg = FDI_RX_IIR(pipe);
3081
        temp = I915_READ(reg);
3082
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3083
        if (temp & FDI_RX_SYMBOL_LOCK) {
3084
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3085
            DRM_DEBUG_KMS("FDI train 2 done.\n");
3086
            break;
3087
        }
3031 serge 3088
			udelay(50);
3089
		}
3090
		if (retry < 5)
3091
			break;
2327 Serge 3092
    }
3093
    if (i == 4)
3094
        DRM_ERROR("FDI train 2 fail!\n");
3095
 
3096
    DRM_DEBUG_KMS("FDI train done.\n");
3097
}
3098
 
3099
/* Manual link training for Ivy Bridge A0 parts */
3100
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3101
{
3102
    struct drm_device *dev = crtc->dev;
3103
    struct drm_i915_private *dev_priv = dev->dev_private;
3104
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3105
    int pipe = intel_crtc->pipe;
4104 Serge 3106
	u32 reg, temp, i, j;
2327 Serge 3107
 
3108
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3109
       for train result */
3110
    reg = FDI_RX_IMR(pipe);
3111
    temp = I915_READ(reg);
3112
    temp &= ~FDI_RX_SYMBOL_LOCK;
3113
    temp &= ~FDI_RX_BIT_LOCK;
3114
    I915_WRITE(reg, temp);
3115
 
3116
    POSTING_READ(reg);
3117
    udelay(150);
3118
 
3243 Serge 3119
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3120
		      I915_READ(FDI_RX_IIR(pipe)));
3121
 
4104 Serge 3122
	/* Try each vswing and preemphasis setting twice before moving on */
3123
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3124
		/* disable first in case we need to retry */
3125
		reg = FDI_TX_CTL(pipe);
3126
		temp = I915_READ(reg);
3127
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3128
		temp &= ~FDI_TX_ENABLE;
3129
		I915_WRITE(reg, temp);
3130
 
3131
		reg = FDI_RX_CTL(pipe);
3132
		temp = I915_READ(reg);
3133
		temp &= ~FDI_LINK_TRAIN_AUTO;
3134
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3135
		temp &= ~FDI_RX_ENABLE;
3136
		I915_WRITE(reg, temp);
3137
 
2327 Serge 3138
    /* enable CPU FDI TX and PCH FDI RX */
3139
    reg = FDI_TX_CTL(pipe);
3140
    temp = I915_READ(reg);
4104 Serge 3141
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3142
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 3143
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3144
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4104 Serge 3145
		temp |= snb_b_fdi_train_param[j/2];
2342 Serge 3146
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 3147
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3148
 
3243 Serge 3149
	I915_WRITE(FDI_RX_MISC(pipe),
3150
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3151
 
2327 Serge 3152
    reg = FDI_RX_CTL(pipe);
3153
    temp = I915_READ(reg);
3154
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2342 Serge 3155
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 3156
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3157
 
3158
    POSTING_READ(reg);
4104 Serge 3159
		udelay(1); /* should be 0.5us */
2327 Serge 3160
 
2342 Serge 3161
	for (i = 0; i < 4; i++) {
2327 Serge 3162
        reg = FDI_RX_IIR(pipe);
3163
        temp = I915_READ(reg);
3164
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3165
 
3166
        if (temp & FDI_RX_BIT_LOCK ||
3167
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3168
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4104 Serge 3169
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3170
					      i);
2327 Serge 3171
            break;
3172
        }
4104 Serge 3173
			udelay(1); /* should be 0.5us */
3174
		}
3175
		if (i == 4) {
3176
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3177
			continue;
2327 Serge 3178
    }
3179
 
3180
    /* Train 2 */
3181
    reg = FDI_TX_CTL(pipe);
3182
    temp = I915_READ(reg);
3183
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3184
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3185
    I915_WRITE(reg, temp);
3186
 
3187
    reg = FDI_RX_CTL(pipe);
3188
    temp = I915_READ(reg);
3189
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3190
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3191
    I915_WRITE(reg, temp);
3192
 
3193
    POSTING_READ(reg);
4104 Serge 3194
		udelay(2); /* should be 1.5us */
2327 Serge 3195
 
2342 Serge 3196
	for (i = 0; i < 4; i++) {
2327 Serge 3197
        reg = FDI_RX_IIR(pipe);
3198
        temp = I915_READ(reg);
3199
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3200
 
4104 Serge 3201
			if (temp & FDI_RX_SYMBOL_LOCK ||
3202
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2327 Serge 3203
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4104 Serge 3204
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3205
					      i);
3206
				goto train_done;
2327 Serge 3207
        }
4104 Serge 3208
			udelay(2); /* should be 1.5us */
2327 Serge 3209
    }
3210
    if (i == 4)
4104 Serge 3211
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3212
	}
2327 Serge 3213
 
4104 Serge 3214
train_done:
2327 Serge 3215
    DRM_DEBUG_KMS("FDI train done.\n");
3216
}
3217
 
3031 serge 3218
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2327 Serge 3219
{
3031 serge 3220
	struct drm_device *dev = intel_crtc->base.dev;
2327 Serge 3221
	struct drm_i915_private *dev_priv = dev->dev_private;
3222
	int pipe = intel_crtc->pipe;
3223
	u32 reg, temp;
3224
 
3225
 
3226
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3227
	reg = FDI_RX_CTL(pipe);
3228
	temp = I915_READ(reg);
4104 Serge 3229
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3230
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3480 Serge 3231
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3232
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3233
 
3234
	POSTING_READ(reg);
3235
	udelay(200);
3236
 
3237
	/* Switch from Rawclk to PCDclk */
3238
	temp = I915_READ(reg);
3239
	I915_WRITE(reg, temp | FDI_PCDCLK);
3240
 
3241
	POSTING_READ(reg);
3242
	udelay(200);
3243
 
3244
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3245
	reg = FDI_TX_CTL(pipe);
3246
	temp = I915_READ(reg);
3247
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3248
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3249
 
3250
		POSTING_READ(reg);
3251
		udelay(100);
3252
	}
3253
}
3254
 
3031 serge 3255
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3256
{
3257
	struct drm_device *dev = intel_crtc->base.dev;
3258
	struct drm_i915_private *dev_priv = dev->dev_private;
3259
	int pipe = intel_crtc->pipe;
3260
	u32 reg, temp;
3261
 
3262
	/* Switch from PCDclk to Rawclk */
3263
	reg = FDI_RX_CTL(pipe);
3264
	temp = I915_READ(reg);
3265
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3266
 
3267
	/* Disable CPU FDI TX PLL */
3268
	reg = FDI_TX_CTL(pipe);
3269
	temp = I915_READ(reg);
3270
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3271
 
3272
	POSTING_READ(reg);
3273
	udelay(100);
3274
 
3275
	reg = FDI_RX_CTL(pipe);
3276
	temp = I915_READ(reg);
3277
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3278
 
3279
	/* Wait for the clocks to turn off. */
3280
	POSTING_READ(reg);
3281
	udelay(100);
3282
}
3283
 
2327 Serge 3284
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3285
{
3286
	struct drm_device *dev = crtc->dev;
3287
	struct drm_i915_private *dev_priv = dev->dev_private;
3288
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3289
	int pipe = intel_crtc->pipe;
3290
	u32 reg, temp;
3291
 
3292
	/* disable CPU FDI tx and PCH FDI rx */
3293
	reg = FDI_TX_CTL(pipe);
3294
	temp = I915_READ(reg);
3295
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3296
	POSTING_READ(reg);
3297
 
3298
	reg = FDI_RX_CTL(pipe);
3299
	temp = I915_READ(reg);
3300
	temp &= ~(0x7 << 16);
3480 Serge 3301
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3302
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3303
 
3304
	POSTING_READ(reg);
3305
	udelay(100);
3306
 
3307
	/* Ironlake workaround, disable clock pointer after downing FDI */
5060 serge 3308
	if (HAS_PCH_IBX(dev))
2327 Serge 3309
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3310
 
3311
	/* still set train pattern 1 */
3312
	reg = FDI_TX_CTL(pipe);
3313
	temp = I915_READ(reg);
3314
	temp &= ~FDI_LINK_TRAIN_NONE;
3315
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3316
	I915_WRITE(reg, temp);
3317
 
3318
	reg = FDI_RX_CTL(pipe);
3319
	temp = I915_READ(reg);
3320
	if (HAS_PCH_CPT(dev)) {
3321
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3322
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3323
	} else {
3324
		temp &= ~FDI_LINK_TRAIN_NONE;
3325
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3326
	}
3327
	/* BPC in FDI rx is consistent with that in PIPECONF */
3328
	temp &= ~(0x07 << 16);
3480 Serge 3329
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 3330
	I915_WRITE(reg, temp);
3331
 
3332
	POSTING_READ(reg);
3333
	udelay(100);
3334
}
3335
 
5060 serge 3336
bool intel_has_pending_fb_unpin(struct drm_device *dev)
2327 Serge 3337
{
5060 serge 3338
	struct intel_crtc *crtc;
2327 Serge 3339
 
5060 serge 3340
	/* Note that we don't need to be called with mode_config.lock here
3341
	 * as our list of CRTC objects is static for the lifetime of the
3342
	 * device and so cannot disappear as we iterate. Similarly, we can
3343
	 * happily treat the predicates as racy, atomic checks as userspace
3344
	 * cannot claim and pin a new fb without at least acquring the
3345
	 * struct_mutex and so serialising with us.
3346
	 */
3347
	for_each_intel_crtc(dev, crtc) {
3348
		if (atomic_read(&crtc->unpin_work_count) == 0)
3349
			continue;
2327 Serge 3350
 
5060 serge 3351
		if (crtc->unpin_work)
3352
			intel_wait_for_vblank(dev, crtc->pipe);
3031 serge 3353
 
5060 serge 3354
		return true;
3355
	}
3356
 
3357
	return false;
2327 Serge 3358
}
3359
 
3031 serge 3360
#if 0
5060 serge 3361
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2327 Serge 3362
{
3031 serge 3363
	struct drm_device *dev = crtc->dev;
3364
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 3365
 
5060 serge 3366
	if (crtc->primary->fb == NULL)
2327 Serge 3367
		return;
3368
 
3480 Serge 3369
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3370
 
5060 serge 3371
	WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3372
				   !intel_crtc_has_pending_flip(crtc),
3373
				   60*HZ) == 0);
3031 serge 3374
 
3375
	mutex_lock(&dev->struct_mutex);
5060 serge 3376
	intel_finish_fb(crtc->primary->fb);
3031 serge 3377
	mutex_unlock(&dev->struct_mutex);
2327 Serge 3378
}
3031 serge 3379
#endif
2327 Serge 3380
 
3031 serge 3381
/* Program iCLKIP clock to the desired frequency */
3382
static void lpt_program_iclkip(struct drm_crtc *crtc)
3383
{
3384
	struct drm_device *dev = crtc->dev;
3385
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3386
	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3031 serge 3387
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3388
	u32 temp;
3389
 
3480 Serge 3390
	mutex_lock(&dev_priv->dpio_lock);
3391
 
3031 serge 3392
	/* It is necessary to ungate the pixclk gate prior to programming
3393
	 * the divisors, and gate it back when it is done.
3394
	 */
3395
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3396
 
3397
	/* Disable SSCCTL */
3398
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3243 Serge 3399
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3400
				SBI_SSCCTL_DISABLE,
3401
			SBI_ICLK);
3031 serge 3402
 
3403
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
4560 Serge 3404
	if (clock == 20000) {
3031 serge 3405
		auxdiv = 1;
3406
		divsel = 0x41;
3407
		phaseinc = 0x20;
3408
	} else {
3409
		/* The iCLK virtual clock root frequency is in MHz,
4560 Serge 3410
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3411
		 * divisors, it is necessary to divide one by another, so we
3031 serge 3412
		 * convert the virtual clock precision to KHz here for higher
3413
		 * precision.
3414
		 */
3415
		u32 iclk_virtual_root_freq = 172800 * 1000;
3416
		u32 iclk_pi_range = 64;
3417
		u32 desired_divisor, msb_divisor_value, pi_value;
3418
 
4560 Serge 3419
		desired_divisor = (iclk_virtual_root_freq / clock);
3031 serge 3420
		msb_divisor_value = desired_divisor / iclk_pi_range;
3421
		pi_value = desired_divisor % iclk_pi_range;
3422
 
3423
		auxdiv = 0;
3424
		divsel = msb_divisor_value - 2;
3425
		phaseinc = pi_value;
3426
	}
3427
 
3428
	/* This should not happen with any sane values */
3429
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3430
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3431
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3432
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3433
 
3434
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4560 Serge 3435
			clock,
3031 serge 3436
			auxdiv,
3437
			divsel,
3438
			phasedir,
3439
			phaseinc);
3440
 
3441
	/* Program SSCDIVINTPHASE6 */
3243 Serge 3442
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3031 serge 3443
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3444
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3445
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3446
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3447
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3448
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3243 Serge 3449
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3031 serge 3450
 
3451
	/* Program SSCAUXDIV */
3243 Serge 3452
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3031 serge 3453
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3454
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3243 Serge 3455
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3031 serge 3456
 
3457
	/* Enable modulator and associated divider */
3243 Serge 3458
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3031 serge 3459
	temp &= ~SBI_SSCCTL_DISABLE;
3243 Serge 3460
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3031 serge 3461
 
3462
	/* Wait for initialization time */
3463
	udelay(24);
3464
 
3465
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3480 Serge 3466
 
3467
	mutex_unlock(&dev_priv->dpio_lock);
3031 serge 3468
}
3469
 
4104 Serge 3470
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3471
						enum pipe pch_transcoder)
3472
{
3473
	struct drm_device *dev = crtc->base.dev;
3474
	struct drm_i915_private *dev_priv = dev->dev_private;
3475
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3476
 
3477
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3478
		   I915_READ(HTOTAL(cpu_transcoder)));
3479
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3480
		   I915_READ(HBLANK(cpu_transcoder)));
3481
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3482
		   I915_READ(HSYNC(cpu_transcoder)));
3483
 
3484
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3485
		   I915_READ(VTOTAL(cpu_transcoder)));
3486
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3487
		   I915_READ(VBLANK(cpu_transcoder)));
3488
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3489
		   I915_READ(VSYNC(cpu_transcoder)));
3490
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3491
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3492
}
3493
 
4280 Serge 3494
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3495
{
3496
	struct drm_i915_private *dev_priv = dev->dev_private;
3497
	uint32_t temp;
3498
 
3499
	temp = I915_READ(SOUTH_CHICKEN1);
3500
	if (temp & FDI_BC_BIFURCATION_SELECT)
3501
		return;
3502
 
3503
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3504
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3505
 
3506
	temp |= FDI_BC_BIFURCATION_SELECT;
3507
	DRM_DEBUG_KMS("enabling fdi C rx\n");
3508
	I915_WRITE(SOUTH_CHICKEN1, temp);
3509
	POSTING_READ(SOUTH_CHICKEN1);
3510
}
3511
 
3512
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3513
{
3514
	struct drm_device *dev = intel_crtc->base.dev;
3515
	struct drm_i915_private *dev_priv = dev->dev_private;
3516
 
3517
	switch (intel_crtc->pipe) {
3518
	case PIPE_A:
3519
		break;
3520
	case PIPE_B:
3521
		if (intel_crtc->config.fdi_lanes > 2)
3522
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3523
		else
3524
			cpt_enable_fdi_bc_bifurcation(dev);
3525
 
3526
		break;
3527
	case PIPE_C:
3528
		cpt_enable_fdi_bc_bifurcation(dev);
3529
 
3530
		break;
3531
	default:
3532
		BUG();
3533
	}
3534
}
3535
 
2327 Serge 3536
/*
3537
 * Enable PCH resources required for PCH ports:
3538
 *   - PCH PLLs
3539
 *   - FDI training & RX/TX
3540
 *   - update transcoder timings
3541
 *   - DP transcoding bits
3542
 *   - transcoder
3543
 */
3544
static void ironlake_pch_enable(struct drm_crtc *crtc)
3545
{
3546
	struct drm_device *dev = crtc->dev;
3547
	struct drm_i915_private *dev_priv = dev->dev_private;
3548
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3549
	int pipe = intel_crtc->pipe;
3031 serge 3550
	u32 reg, temp;
2327 Serge 3551
 
4104 Serge 3552
	assert_pch_transcoder_disabled(dev_priv, pipe);
3031 serge 3553
 
4280 Serge 3554
	if (IS_IVYBRIDGE(dev))
3555
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3556
 
3243 Serge 3557
	/* Write the TU size bits before fdi link training, so that error
3558
	 * detection works. */
3559
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3560
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3561
 
2327 Serge 3562
	/* For PCH output, training FDI link */
3563
	dev_priv->display.fdi_link_train(crtc);
3564
 
4104 Serge 3565
	/* We need to program the right clock selection before writing the pixel
3566
	 * mutliplier into the DPLL. */
3243 Serge 3567
	if (HAS_PCH_CPT(dev)) {
3031 serge 3568
		u32 sel;
2342 Serge 3569
 
2327 Serge 3570
		temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 3571
		temp |= TRANS_DPLL_ENABLE(pipe);
3572
		sel = TRANS_DPLLB_SEL(pipe);
3573
		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3031 serge 3574
			temp |= sel;
3575
		else
3576
			temp &= ~sel;
2327 Serge 3577
		I915_WRITE(PCH_DPLL_SEL, temp);
3578
	}
3579
 
4104 Serge 3580
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3581
	 * transcoder, and we actually should do this to not upset any PCH
3582
	 * transcoder that already use the clock when we share it.
3583
	 *
3584
	 * Note that enable_shared_dpll tries to do the right thing, but
3585
	 * get_shared_dpll unconditionally resets the pll - we need that to have
3586
	 * the right LVDS enable sequence. */
5060 serge 3587
	intel_enable_shared_dpll(intel_crtc);
4104 Serge 3588
 
2327 Serge 3589
	/* set transcoder timing, panel must allow it */
3590
	assert_panel_unlocked(dev_priv, pipe);
4104 Serge 3591
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
2327 Serge 3592
 
3593
	intel_fdi_normal_train(crtc);
3594
 
3595
	/* For PCH DP, enable TRANS_DP_CTL */
3596
	if (HAS_PCH_CPT(dev) &&
2342 Serge 3597
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3598
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3480 Serge 3599
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2327 Serge 3600
		reg = TRANS_DP_CTL(pipe);
3601
		temp = I915_READ(reg);
3602
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3603
			  TRANS_DP_SYNC_MASK |
3604
			  TRANS_DP_BPC_MASK);
3605
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3606
			 TRANS_DP_ENH_FRAMING);
3607
		temp |= bpc << 9; /* same format but at 11:9 */
3608
 
3609
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3610
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3611
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3612
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3613
 
3614
		switch (intel_trans_dp_port_sel(crtc)) {
3615
		case PCH_DP_B:
3616
			temp |= TRANS_DP_PORT_SEL_B;
3617
			break;
3618
		case PCH_DP_C:
3619
			temp |= TRANS_DP_PORT_SEL_C;
3620
			break;
3621
		case PCH_DP_D:
3622
			temp |= TRANS_DP_PORT_SEL_D;
3623
			break;
3624
		default:
3243 Serge 3625
			BUG();
2327 Serge 3626
		}
3627
 
3628
		I915_WRITE(reg, temp);
3629
	}
3630
 
3243 Serge 3631
	ironlake_enable_pch_transcoder(dev_priv, pipe);
2327 Serge 3632
}
3633
 
3243 Serge 3634
static void lpt_pch_enable(struct drm_crtc *crtc)
3635
{
3636
	struct drm_device *dev = crtc->dev;
3637
	struct drm_i915_private *dev_priv = dev->dev_private;
3638
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 3639
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 3640
 
4104 Serge 3641
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3243 Serge 3642
 
3643
	lpt_program_iclkip(crtc);
3644
 
3645
	/* Set transcoder timing. */
4104 Serge 3646
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3243 Serge 3647
 
3648
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3649
}
3650
 
5060 serge 3651
void intel_put_shared_dpll(struct intel_crtc *crtc)
3031 serge 3652
{
4104 Serge 3653
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3031 serge 3654
 
3655
	if (pll == NULL)
3656
		return;
3657
 
3658
	if (pll->refcount == 0) {
4104 Serge 3659
		WARN(1, "bad %s refcount\n", pll->name);
3031 serge 3660
		return;
3661
	}
3662
 
4104 Serge 3663
	if (--pll->refcount == 0) {
3664
		WARN_ON(pll->on);
3665
		WARN_ON(pll->active);
3666
	}
3667
 
3668
	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3031 serge 3669
}
3670
 
5060 serge 3671
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3031 serge 3672
{
4104 Serge 3673
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3674
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3675
	enum intel_dpll_id i;
3031 serge 3676
 
3677
	if (pll) {
4104 Serge 3678
		DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3679
			      crtc->base.base.id, pll->name);
3680
		intel_put_shared_dpll(crtc);
3031 serge 3681
	}
3682
 
3683
	if (HAS_PCH_IBX(dev_priv->dev)) {
3684
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4104 Serge 3685
		i = (enum intel_dpll_id) crtc->pipe;
3686
		pll = &dev_priv->shared_dplls[i];
3031 serge 3687
 
4104 Serge 3688
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3689
			      crtc->base.base.id, pll->name);
3031 serge 3690
 
5060 serge 3691
		WARN_ON(pll->refcount);
3692
 
3031 serge 3693
		goto found;
3694
	}
3695
 
4104 Serge 3696
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3697
		pll = &dev_priv->shared_dplls[i];
3031 serge 3698
 
3699
		/* Only want to check enabled timings first */
3700
		if (pll->refcount == 0)
3701
			continue;
3702
 
4104 Serge 3703
		if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3704
			   sizeof(pll->hw_state)) == 0) {
3705
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3706
				      crtc->base.base.id,
3707
				      pll->name, pll->refcount, pll->active);
3031 serge 3708
 
3709
			goto found;
3710
		}
3711
	}
3712
 
3713
	/* Ok no matching timings, maybe there's a free one? */
4104 Serge 3714
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3715
		pll = &dev_priv->shared_dplls[i];
3031 serge 3716
		if (pll->refcount == 0) {
4104 Serge 3717
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3718
				      crtc->base.base.id, pll->name);
3031 serge 3719
			goto found;
3720
		}
3721
	}
3722
 
3723
	return NULL;
3724
 
3725
found:
5060 serge 3726
	if (pll->refcount == 0)
3727
		pll->hw_state = crtc->config.dpll_hw_state;
3728
 
4104 Serge 3729
	crtc->config.shared_dpll = i;
3730
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3731
			 pipe_name(crtc->pipe));
3732
 
3031 serge 3733
	pll->refcount++;
3734
 
3735
	return pll;
3736
}
3737
 
4104 Serge 3738
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
2342 Serge 3739
{
3740
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 3741
	int dslreg = PIPEDSL(pipe);
2342 Serge 3742
	u32 temp;
3743
 
3744
	temp = I915_READ(dslreg);
3745
	udelay(500);
3746
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
3747
		if (wait_for(I915_READ(dslreg) != temp, 5))
4104 Serge 3748
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
2342 Serge 3749
	}
3750
}
3751
 
4104 Serge 3752
static void ironlake_pfit_enable(struct intel_crtc *crtc)
3753
{
3754
	struct drm_device *dev = crtc->base.dev;
3755
	struct drm_i915_private *dev_priv = dev->dev_private;
3756
	int pipe = crtc->pipe;
3757
 
3758
	if (crtc->config.pch_pfit.enabled) {
3759
		/* Force use of hard-coded filter coefficients
3760
		 * as some pre-programmed values are broken,
3761
		 * e.g. x201.
3762
		 */
3763
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3764
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3765
						 PF_PIPE_SEL_IVB(pipe));
3766
		else
3767
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3768
		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3769
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3770
	}
3771
}
3772
 
3773
static void intel_enable_planes(struct drm_crtc *crtc)
3774
{
3775
	struct drm_device *dev = crtc->dev;
3776
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
5060 serge 3777
	struct drm_plane *plane;
4104 Serge 3778
	struct intel_plane *intel_plane;
3779
 
5060 serge 3780
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3781
		intel_plane = to_intel_plane(plane);
4104 Serge 3782
		if (intel_plane->pipe == pipe)
3783
			intel_plane_restore(&intel_plane->base);
5060 serge 3784
	}
4104 Serge 3785
}
3786
 
3787
static void intel_disable_planes(struct drm_crtc *crtc)
3788
{
3789
	struct drm_device *dev = crtc->dev;
3790
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
5060 serge 3791
	struct drm_plane *plane;
4104 Serge 3792
	struct intel_plane *intel_plane;
3793
 
5060 serge 3794
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3795
		intel_plane = to_intel_plane(plane);
4104 Serge 3796
		if (intel_plane->pipe == pipe)
3797
			intel_plane_disable(&intel_plane->base);
5060 serge 3798
	}
4104 Serge 3799
}
3800
 
4560 Serge 3801
void hsw_enable_ips(struct intel_crtc *crtc)
3802
{
5060 serge 3803
	struct drm_device *dev = crtc->base.dev;
3804
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3805
 
3806
	if (!crtc->config.ips_enabled)
3807
		return;
3808
 
5060 serge 3809
	/* We can only enable IPS after we enable a plane and wait for a vblank */
3810
	intel_wait_for_vblank(dev, crtc->pipe);
3811
 
4560 Serge 3812
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 3813
	if (IS_BROADWELL(dev)) {
4560 Serge 3814
		mutex_lock(&dev_priv->rps.hw_lock);
3815
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3816
		mutex_unlock(&dev_priv->rps.hw_lock);
3817
		/* Quoting Art Runyan: "its not safe to expect any particular
3818
		 * value in IPS_CTL bit 31 after enabling IPS through the
3819
		 * mailbox." Moreover, the mailbox may return a bogus state,
3820
		 * so we need to just enable it and continue on.
3821
		 */
3822
	} else {
3823
		I915_WRITE(IPS_CTL, IPS_ENABLE);
3824
		/* The bit only becomes 1 in the next vblank, so this wait here
3825
		 * is essentially intel_wait_for_vblank. If we don't have this
3826
		 * and don't wait for vblanks until the end of crtc_enable, then
3827
		 * the HW state readout code will complain that the expected
3828
		 * IPS_CTL value is not the one we read. */
3829
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3830
			DRM_ERROR("Timed out waiting for IPS enable\n");
3831
	}
3832
}
3833
 
3834
void hsw_disable_ips(struct intel_crtc *crtc)
3835
{
3836
	struct drm_device *dev = crtc->base.dev;
3837
	struct drm_i915_private *dev_priv = dev->dev_private;
3838
 
3839
	if (!crtc->config.ips_enabled)
3840
		return;
3841
 
3842
	assert_plane_enabled(dev_priv, crtc->plane);
5060 serge 3843
	if (IS_BROADWELL(dev)) {
4560 Serge 3844
		mutex_lock(&dev_priv->rps.hw_lock);
3845
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3846
		mutex_unlock(&dev_priv->rps.hw_lock);
5060 serge 3847
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
3848
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
3849
			DRM_ERROR("Timed out waiting for IPS disable\n");
4560 Serge 3850
	} else {
3851
		I915_WRITE(IPS_CTL, 0);
3852
		POSTING_READ(IPS_CTL);
3853
	}
3854
 
3855
	/* We need to wait for a vblank before we can disable the plane. */
3856
	intel_wait_for_vblank(dev, crtc->pipe);
3857
}
3858
 
3859
/** Loads the palette/gamma unit for the CRTC with the prepared values */
3860
static void intel_crtc_load_lut(struct drm_crtc *crtc)
3861
{
3862
	struct drm_device *dev = crtc->dev;
3863
	struct drm_i915_private *dev_priv = dev->dev_private;
3864
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3865
	enum pipe pipe = intel_crtc->pipe;
3866
	int palreg = PALETTE(pipe);
3867
	int i;
3868
	bool reenable_ips = false;
3869
 
3870
	/* The clocks have to be on to load the palette. */
3871
	if (!crtc->enabled || !intel_crtc->active)
3872
		return;
3873
 
3874
	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3875
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3876
			assert_dsi_pll_enabled(dev_priv);
3877
		else
3878
			assert_pll_enabled(dev_priv, pipe);
3879
	}
3880
 
3881
	/* use legacy palette for Ironlake */
5060 serge 3882
	if (!HAS_GMCH_DISPLAY(dev))
4560 Serge 3883
		palreg = LGC_PALETTE(pipe);
3884
 
3885
	/* Workaround : Do not read or write the pipe palette/gamma data while
3886
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3887
	 */
3888
	if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3889
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3890
	     GAMMA_MODE_MODE_SPLIT)) {
3891
		hsw_disable_ips(intel_crtc);
3892
		reenable_ips = true;
3893
	}
3894
 
3895
	for (i = 0; i < 256; i++) {
3896
		I915_WRITE(palreg + 4 * i,
3897
			   (intel_crtc->lut_r[i] << 16) |
3898
			   (intel_crtc->lut_g[i] << 8) |
3899
			   intel_crtc->lut_b[i]);
3900
	}
3901
 
3902
	if (reenable_ips)
3903
		hsw_enable_ips(intel_crtc);
3904
}
3905
 
5060 serge 3906
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3907
{
3908
	if (!enable && intel_crtc->overlay) {
3909
		struct drm_device *dev = intel_crtc->base.dev;
3910
		struct drm_i915_private *dev_priv = dev->dev_private;
3911
 
3912
		mutex_lock(&dev->struct_mutex);
3913
		dev_priv->mm.interruptible = false;
3914
        dev_priv->mm.interruptible = true;
3915
		mutex_unlock(&dev->struct_mutex);
3916
	}
3917
 
3918
	/* Let userspace switch the overlay on again. In most cases userspace
3919
	 * has to recompute where to put it anyway.
3920
	 */
3921
}
3922
 
3923
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3924
{
3925
	struct drm_device *dev = crtc->dev;
3926
	struct drm_i915_private *dev_priv = dev->dev_private;
3927
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3928
	int pipe = intel_crtc->pipe;
3929
	int plane = intel_crtc->plane;
3930
 
3931
	drm_vblank_on(dev, pipe);
3932
 
3933
	intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3934
	intel_enable_planes(crtc);
3935
	intel_crtc_update_cursor(crtc, true);
3936
	intel_crtc_dpms_overlay(intel_crtc, true);
3937
 
3938
	hsw_enable_ips(intel_crtc);
3939
 
3940
	mutex_lock(&dev->struct_mutex);
3941
	intel_update_fbc(dev);
3942
	mutex_unlock(&dev->struct_mutex);
3943
}
3944
 
3945
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3946
{
3947
	struct drm_device *dev = crtc->dev;
3948
	struct drm_i915_private *dev_priv = dev->dev_private;
3949
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3950
	int pipe = intel_crtc->pipe;
3951
	int plane = intel_crtc->plane;
3952
 
3953
 
3954
	if (dev_priv->fbc.plane == plane)
3955
		intel_disable_fbc(dev);
3956
 
3957
	hsw_disable_ips(intel_crtc);
3958
 
3959
	intel_crtc_dpms_overlay(intel_crtc, false);
3960
	intel_crtc_update_cursor(crtc, false);
3961
	intel_disable_planes(crtc);
3962
	intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3963
	drm_vblank_off(dev, pipe);
3964
}
3965
 
2327 Serge 3966
static void ironlake_crtc_enable(struct drm_crtc *crtc)
3967
{
3968
    struct drm_device *dev = crtc->dev;
3969
    struct drm_i915_private *dev_priv = dev->dev_private;
3970
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3971
	struct intel_encoder *encoder;
2327 Serge 3972
    int pipe = intel_crtc->pipe;
5060 serge 3973
	enum plane plane = intel_crtc->plane;
2327 Serge 3974
 
3031 serge 3975
	WARN_ON(!crtc->enabled);
3976
 
2327 Serge 3977
    if (intel_crtc->active)
3978
        return;
3979
 
5060 serge 3980
	if (intel_crtc->config.has_pch_encoder)
3981
		intel_prepare_shared_dpll(intel_crtc);
3982
 
3983
	if (intel_crtc->config.has_dp_encoder)
3984
		intel_dp_set_m_n(intel_crtc);
3985
 
3986
	intel_set_pipe_timings(intel_crtc);
3987
 
3988
	if (intel_crtc->config.has_pch_encoder) {
3989
		intel_cpu_transcoder_set_m_n(intel_crtc,
3990
					     &intel_crtc->config.fdi_m_n);
3991
	}
3992
 
3993
	ironlake_set_pipeconf(crtc);
3994
 
3995
	/* Set up the display plane register */
3996
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
3997
	POSTING_READ(DSPCNTR(plane));
3998
 
3999
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4000
					       crtc->x, crtc->y);
4001
 
2327 Serge 4002
    intel_crtc->active = true;
4104 Serge 4003
 
4004
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4005
	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4006
 
4007
	for_each_encoder_on_crtc(dev, crtc, encoder)
4008
		if (encoder->pre_enable)
4009
			encoder->pre_enable(encoder);
2327 Serge 4010
 
3746 Serge 4011
	if (intel_crtc->config.has_pch_encoder) {
3243 Serge 4012
		/* Note: FDI PLL enabling _must_ be done before we enable the
4013
		 * cpu pipes, hence this is separate from all the other fdi/pch
4014
		 * enabling. */
3031 serge 4015
		ironlake_fdi_pll_enable(intel_crtc);
4016
	} else {
4017
		assert_fdi_tx_disabled(dev_priv, pipe);
4018
		assert_fdi_rx_disabled(dev_priv, pipe);
4019
	}
2327 Serge 4020
 
4104 Serge 4021
	ironlake_pfit_enable(intel_crtc);
3031 serge 4022
 
2327 Serge 4023
    /*
4024
     * On ILK+ LUT must be loaded before the pipe is running but with
4025
     * clocks enabled
4026
     */
4027
    intel_crtc_load_lut(crtc);
4028
 
4560 Serge 4029
	intel_update_watermarks(crtc);
5060 serge 4030
	intel_enable_pipe(intel_crtc);
2327 Serge 4031
 
3746 Serge 4032
	if (intel_crtc->config.has_pch_encoder)
2327 Serge 4033
        ironlake_pch_enable(crtc);
4034
 
3031 serge 4035
	for_each_encoder_on_crtc(dev, crtc, encoder)
4036
		encoder->enable(encoder);
4037
 
4038
	if (HAS_PCH_CPT(dev))
4104 Serge 4039
		cpt_verify_modeset(dev, intel_crtc->pipe);
3031 serge 4040
 
5060 serge 4041
	intel_crtc_enable_planes(crtc);
2327 Serge 4042
}
4043
 
4104 Serge 4044
/* IPS only exists on ULT machines and is tied to pipe A. */
4045
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4046
{
4047
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4048
}
4049
 
4560 Serge 4050
/*
4051
 * This implements the workaround described in the "notes" section of the mode
4052
 * set sequence documentation. When going from no pipes or single pipe to
4053
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
4054
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4055
 */
4056
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4057
{
4058
	struct drm_device *dev = crtc->base.dev;
4059
	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4060
 
4061
	/* We want to get the other_active_crtc only if there's only 1 other
4062
	 * active crtc. */
5060 serge 4063
	for_each_intel_crtc(dev, crtc_it) {
4560 Serge 4064
		if (!crtc_it->active || crtc_it == crtc)
4065
			continue;
4066
 
4067
		if (other_active_crtc)
4104 Serge 4068
		return;
4069
 
4560 Serge 4070
		other_active_crtc = crtc_it;
4071
	}
4072
	if (!other_active_crtc)
4073
		return;
4104 Serge 4074
 
4560 Serge 4075
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4076
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4104 Serge 4077
}
4078
 
3243 Serge 4079
static void haswell_crtc_enable(struct drm_crtc *crtc)
4080
{
4081
	struct drm_device *dev = crtc->dev;
4082
	struct drm_i915_private *dev_priv = dev->dev_private;
4083
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4084
	struct intel_encoder *encoder;
4085
	int pipe = intel_crtc->pipe;
5060 serge 4086
	enum plane plane = intel_crtc->plane;
3243 Serge 4087
 
4088
	WARN_ON(!crtc->enabled);
4089
 
4090
	if (intel_crtc->active)
4091
		return;
4092
 
5060 serge 4093
	if (intel_crtc_to_shared_dpll(intel_crtc))
4094
		intel_enable_shared_dpll(intel_crtc);
4095
 
4096
	if (intel_crtc->config.has_dp_encoder)
4097
		intel_dp_set_m_n(intel_crtc);
4098
 
4099
	intel_set_pipe_timings(intel_crtc);
4100
 
4101
	if (intel_crtc->config.has_pch_encoder) {
4102
		intel_cpu_transcoder_set_m_n(intel_crtc,
4103
					     &intel_crtc->config.fdi_m_n);
4104
	}
4105
 
4106
	haswell_set_pipeconf(crtc);
4107
 
4108
	intel_set_pipe_csc(crtc);
4109
 
4110
	/* Set up the display plane register */
4111
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
4112
	POSTING_READ(DSPCNTR(plane));
4113
 
4114
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4115
					       crtc->x, crtc->y);
4116
 
3243 Serge 4117
	intel_crtc->active = true;
4104 Serge 4118
 
4119
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3243 Serge 4120
	for_each_encoder_on_crtc(dev, crtc, encoder)
4121
		if (encoder->pre_enable)
4122
			encoder->pre_enable(encoder);
4123
 
5060 serge 4124
	if (intel_crtc->config.has_pch_encoder) {
4125
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4126
		dev_priv->display.fdi_link_train(crtc);
4127
	}
4128
 
3243 Serge 4129
	intel_ddi_enable_pipe_clock(intel_crtc);
4130
 
4104 Serge 4131
	ironlake_pfit_enable(intel_crtc);
3243 Serge 4132
 
4133
	/*
4134
	 * On ILK+ LUT must be loaded before the pipe is running but with
4135
	 * clocks enabled
4136
	 */
4137
	intel_crtc_load_lut(crtc);
4138
 
4139
	intel_ddi_set_pipe_settings(crtc);
3746 Serge 4140
	intel_ddi_enable_transcoder_func(crtc);
3243 Serge 4141
 
4560 Serge 4142
	intel_update_watermarks(crtc);
5060 serge 4143
	intel_enable_pipe(intel_crtc);
3243 Serge 4144
 
3746 Serge 4145
	if (intel_crtc->config.has_pch_encoder)
3243 Serge 4146
		lpt_pch_enable(crtc);
4147
 
5060 serge 4148
	if (intel_crtc->config.dp_encoder_is_mst)
4149
		intel_ddi_set_vc_payload_alloc(crtc, true);
4150
 
4560 Serge 4151
	for_each_encoder_on_crtc(dev, crtc, encoder) {
3243 Serge 4152
		encoder->enable(encoder);
4560 Serge 4153
		intel_opregion_notify_encoder(encoder, true);
4154
	}
3243 Serge 4155
 
4560 Serge 4156
	/* If we change the relative order between pipe/planes enabling, we need
4157
	 * to change the workaround. */
4158
	haswell_mode_set_planes_workaround(intel_crtc);
5060 serge 4159
	intel_crtc_enable_planes(crtc);
3243 Serge 4160
}
4161
 
4104 Serge 4162
static void ironlake_pfit_disable(struct intel_crtc *crtc)
4163
{
4164
	struct drm_device *dev = crtc->base.dev;
4165
	struct drm_i915_private *dev_priv = dev->dev_private;
4166
	int pipe = crtc->pipe;
4167
 
4168
	/* To avoid upsetting the power well on haswell only disable the pfit if
4169
	 * it's in use. The hw state code will make sure we get this right. */
4170
	if (crtc->config.pch_pfit.enabled) {
4171
		I915_WRITE(PF_CTL(pipe), 0);
4172
		I915_WRITE(PF_WIN_POS(pipe), 0);
4173
		I915_WRITE(PF_WIN_SZ(pipe), 0);
4174
	}
4175
}
4176
 
2327 Serge 4177
static void ironlake_crtc_disable(struct drm_crtc *crtc)
4178
{
4179
    struct drm_device *dev = crtc->dev;
4180
    struct drm_i915_private *dev_priv = dev->dev_private;
4181
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4182
	struct intel_encoder *encoder;
2327 Serge 4183
    int pipe = intel_crtc->pipe;
4184
    u32 reg, temp;
4185
 
4186
    if (!intel_crtc->active)
4187
        return;
4188
 
5060 serge 4189
	intel_crtc_disable_planes(crtc);
4190
 
3031 serge 4191
	for_each_encoder_on_crtc(dev, crtc, encoder)
4192
		encoder->disable(encoder);
2336 Serge 4193
 
4104 Serge 4194
	if (intel_crtc->config.has_pch_encoder)
4195
		intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
2327 Serge 4196
 
4197
    intel_disable_pipe(dev_priv, pipe);
4104 Serge 4198
	ironlake_pfit_disable(intel_crtc);
2327 Serge 4199
 
3031 serge 4200
	for_each_encoder_on_crtc(dev, crtc, encoder)
4201
		if (encoder->post_disable)
4202
			encoder->post_disable(encoder);
4203
 
4104 Serge 4204
	if (intel_crtc->config.has_pch_encoder) {
2327 Serge 4205
    ironlake_fdi_disable(crtc);
4206
 
3243 Serge 4207
	ironlake_disable_pch_transcoder(dev_priv, pipe);
4104 Serge 4208
		intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
2327 Serge 4209
 
4210
    if (HAS_PCH_CPT(dev)) {
4211
        /* disable TRANS_DP_CTL */
4212
        reg = TRANS_DP_CTL(pipe);
4213
        temp = I915_READ(reg);
4104 Serge 4214
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4215
				  TRANS_DP_PORT_SEL_MASK);
2327 Serge 4216
        temp |= TRANS_DP_PORT_SEL_NONE;
4217
        I915_WRITE(reg, temp);
4218
 
4219
        /* disable DPLL_SEL */
4220
        temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 4221
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
2327 Serge 4222
        I915_WRITE(PCH_DPLL_SEL, temp);
4223
    }
4224
 
4225
    /* disable PCH DPLL */
4104 Serge 4226
		intel_disable_shared_dpll(intel_crtc);
2327 Serge 4227
 
3031 serge 4228
	ironlake_fdi_pll_disable(intel_crtc);
4104 Serge 4229
	}
2327 Serge 4230
 
4231
    intel_crtc->active = false;
4560 Serge 4232
	intel_update_watermarks(crtc);
2327 Serge 4233
 
4234
    mutex_lock(&dev->struct_mutex);
4235
    intel_update_fbc(dev);
4236
    mutex_unlock(&dev->struct_mutex);
4237
}
4238
 
3243 Serge 4239
static void haswell_crtc_disable(struct drm_crtc *crtc)
4240
{
4241
	struct drm_device *dev = crtc->dev;
4242
	struct drm_i915_private *dev_priv = dev->dev_private;
4243
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4244
	struct intel_encoder *encoder;
4245
	int pipe = intel_crtc->pipe;
3746 Serge 4246
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 4247
 
4248
	if (!intel_crtc->active)
4249
		return;
4250
 
5060 serge 4251
	intel_crtc_disable_planes(crtc);
4560 Serge 4252
 
4253
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4254
		intel_opregion_notify_encoder(encoder, false);
3243 Serge 4255
		encoder->disable(encoder);
4560 Serge 4256
	}
3243 Serge 4257
 
4104 Serge 4258
	if (intel_crtc->config.has_pch_encoder)
4259
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
3243 Serge 4260
	intel_disable_pipe(dev_priv, pipe);
4261
 
5097 serge 4262
	if (intel_crtc->config.dp_encoder_is_mst)
4263
		intel_ddi_set_vc_payload_alloc(crtc, false);
4264
 
3243 Serge 4265
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4266
 
4104 Serge 4267
	ironlake_pfit_disable(intel_crtc);
3243 Serge 4268
 
4269
	intel_ddi_disable_pipe_clock(intel_crtc);
4270
 
3746 Serge 4271
	if (intel_crtc->config.has_pch_encoder) {
3243 Serge 4272
		lpt_disable_pch_transcoder(dev_priv);
4104 Serge 4273
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3243 Serge 4274
		intel_ddi_fdi_disable(crtc);
4275
	}
4276
 
5060 serge 4277
	for_each_encoder_on_crtc(dev, crtc, encoder)
4278
		if (encoder->post_disable)
4279
			encoder->post_disable(encoder);
4280
 
3243 Serge 4281
	intel_crtc->active = false;
4560 Serge 4282
	intel_update_watermarks(crtc);
3243 Serge 4283
 
4284
	mutex_lock(&dev->struct_mutex);
4285
	intel_update_fbc(dev);
4286
	mutex_unlock(&dev->struct_mutex);
5060 serge 4287
 
4288
	if (intel_crtc_to_shared_dpll(intel_crtc))
4289
		intel_disable_shared_dpll(intel_crtc);
3243 Serge 4290
}
4291
 
3031 serge 4292
static void ironlake_crtc_off(struct drm_crtc *crtc)
2327 Serge 4293
{
4294
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4104 Serge 4295
	intel_put_shared_dpll(intel_crtc);
2327 Serge 4296
}
4297
 
3243 Serge 4298
 
4104 Serge 4299
static void i9xx_pfit_enable(struct intel_crtc *crtc)
4300
{
4301
	struct drm_device *dev = crtc->base.dev;
4302
	struct drm_i915_private *dev_priv = dev->dev_private;
4303
	struct intel_crtc_config *pipe_config = &crtc->config;
4304
 
4305
	if (!crtc->config.gmch_pfit.control)
4306
		return;
4307
 
4308
	/*
4309
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
4310
	 * according to register description and PRM.
4311
	 */
4312
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4313
	assert_pipe_disabled(dev_priv, crtc->pipe);
4314
 
4315
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4316
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4317
 
4318
	/* Border color in case we don't scale up to the full screen. Black by
4319
	 * default, change to something else for debugging. */
4320
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
4321
}
4322
 
5060 serge 4323
static enum intel_display_power_domain port_to_power_domain(enum port port)
4560 Serge 4324
{
5060 serge 4325
	switch (port) {
4326
	case PORT_A:
4327
		return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4328
	case PORT_B:
4329
		return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4330
	case PORT_C:
4331
		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4332
	case PORT_D:
4333
		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4334
	default:
4335
		WARN_ON_ONCE(1);
4336
		return POWER_DOMAIN_PORT_OTHER;
4337
	}
4338
}
4339
 
4340
#define for_each_power_domain(domain, mask)				\
4341
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
4342
		if ((1 << (domain)) & (mask))
4343
 
4344
enum intel_display_power_domain
4345
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4346
{
4347
	struct drm_device *dev = intel_encoder->base.dev;
4348
	struct intel_digital_port *intel_dig_port;
4349
 
4350
	switch (intel_encoder->type) {
4351
	case INTEL_OUTPUT_UNKNOWN:
4352
		/* Only DDI platforms should ever use this output type */
4353
		WARN_ON_ONCE(!HAS_DDI(dev));
4354
	case INTEL_OUTPUT_DISPLAYPORT:
4355
	case INTEL_OUTPUT_HDMI:
4356
	case INTEL_OUTPUT_EDP:
4357
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4358
		return port_to_power_domain(intel_dig_port->port);
4359
	case INTEL_OUTPUT_DP_MST:
4360
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
4361
		return port_to_power_domain(intel_dig_port->port);
4362
	case INTEL_OUTPUT_ANALOG:
4363
		return POWER_DOMAIN_PORT_CRT;
4364
	case INTEL_OUTPUT_DSI:
4365
		return POWER_DOMAIN_PORT_DSI;
4366
	default:
4367
		return POWER_DOMAIN_PORT_OTHER;
4368
	}
4369
}
4370
 
4371
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4372
{
4373
	struct drm_device *dev = crtc->dev;
4374
	struct intel_encoder *intel_encoder;
4375
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4376
	enum pipe pipe = intel_crtc->pipe;
4377
	unsigned long mask;
4378
	enum transcoder transcoder;
4379
 
4380
	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4381
 
4382
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
4383
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4384
	if (intel_crtc->config.pch_pfit.enabled ||
4385
	    intel_crtc->config.pch_pfit.force_thru)
4386
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4387
 
4388
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4389
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
4390
 
4391
	return mask;
4392
}
4393
 
4394
void intel_display_set_init_power(struct drm_i915_private *dev_priv,
4395
				  bool enable)
4396
{
4397
	if (dev_priv->power_domains.init_power_on == enable)
4398
		return;
4399
 
4400
	if (enable)
4401
		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
4402
	else
4403
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
4404
 
4405
	dev_priv->power_domains.init_power_on = enable;
4406
}
4407
 
4408
static void modeset_update_crtc_power_domains(struct drm_device *dev)
4409
{
4410
	struct drm_i915_private *dev_priv = dev->dev_private;
4411
	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4412
	struct intel_crtc *crtc;
4413
 
4414
	/*
4415
	 * First get all needed power domains, then put all unneeded, to avoid
4416
	 * any unnecessary toggling of the power wells.
4417
	 */
4418
	for_each_intel_crtc(dev, crtc) {
4419
		enum intel_display_power_domain domain;
4420
 
4421
		if (!crtc->base.enabled)
4422
			continue;
4423
 
4424
		pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4425
 
4426
		for_each_power_domain(domain, pipe_domains[crtc->pipe])
4427
			intel_display_power_get(dev_priv, domain);
4428
	}
4429
 
4430
	for_each_intel_crtc(dev, crtc) {
4431
		enum intel_display_power_domain domain;
4432
 
4433
		for_each_power_domain(domain, crtc->enabled_power_domains)
4434
			intel_display_power_put(dev_priv, domain);
4435
 
4436
		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4437
	}
4438
 
4439
	intel_display_set_init_power(dev_priv, false);
4440
}
4441
 
4442
/* returns HPLL frequency in kHz */
4443
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4444
{
4560 Serge 4445
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4446
 
4447
	/* Obtain SKU information */
4448
	mutex_lock(&dev_priv->dpio_lock);
4449
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4450
		CCK_FUSE_HPLL_FREQ_MASK;
4451
	mutex_unlock(&dev_priv->dpio_lock);
4452
 
5060 serge 4453
	return vco_freq[hpll_freq] * 1000;
4560 Serge 4454
}
4455
 
5060 serge 4456
static void vlv_update_cdclk(struct drm_device *dev)
4457
{
4458
	struct drm_i915_private *dev_priv = dev->dev_private;
4459
 
4460
	dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4461
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
4462
			 dev_priv->vlv_cdclk_freq);
4463
 
4464
	/*
4465
	 * Program the gmbus_freq based on the cdclk frequency.
4466
	 * BSpec erroneously claims we should aim for 4MHz, but
4467
	 * in fact 1MHz is the correct frequency.
4468
	 */
4469
	I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
4470
}
4471
 
4560 Serge 4472
/* Adjust CDclk dividers to allow high res or save power if possible */
4473
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4474
{
4475
	struct drm_i915_private *dev_priv = dev->dev_private;
4476
	u32 val, cmd;
4477
 
5060 serge 4478
	WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4479
 
4480
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4560 Serge 4481
		cmd = 2;
5060 serge 4482
	else if (cdclk == 266667)
4560 Serge 4483
		cmd = 1;
4484
	else
4485
		cmd = 0;
4486
 
4487
	mutex_lock(&dev_priv->rps.hw_lock);
4488
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4489
	val &= ~DSPFREQGUAR_MASK;
4490
	val |= (cmd << DSPFREQGUAR_SHIFT);
4491
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4492
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4493
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4494
		     50)) {
4495
		DRM_ERROR("timed out waiting for CDclk change\n");
4496
	}
4497
	mutex_unlock(&dev_priv->rps.hw_lock);
4498
 
5060 serge 4499
	if (cdclk == 400000) {
4560 Serge 4500
		u32 divider, vco;
4501
 
4502
		vco = valleyview_get_vco(dev_priv);
5060 serge 4503
		divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
4560 Serge 4504
 
4505
		mutex_lock(&dev_priv->dpio_lock);
4506
		/* adjust cdclk divider */
4507
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5060 serge 4508
		val &= ~DISPLAY_FREQUENCY_VALUES;
4560 Serge 4509
		val |= divider;
4510
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5060 serge 4511
 
4512
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4513
			      DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4514
			     50))
4515
			DRM_ERROR("timed out waiting for CDclk change\n");
4560 Serge 4516
		mutex_unlock(&dev_priv->dpio_lock);
4517
	}
4518
 
4519
	mutex_lock(&dev_priv->dpio_lock);
4520
	/* adjust self-refresh exit latency value */
4521
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4522
	val &= ~0x7f;
4523
 
4524
	/*
4525
	 * For high bandwidth configs, we set a higher latency in the bunit
4526
	 * so that the core display fetch happens in time to avoid underruns.
4527
	 */
5060 serge 4528
	if (cdclk == 400000)
4560 Serge 4529
		val |= 4500 / 250; /* 4.5 usec */
4530
	else
4531
		val |= 3000 / 250; /* 3.0 usec */
4532
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4533
	mutex_unlock(&dev_priv->dpio_lock);
4534
 
5060 serge 4535
	vlv_update_cdclk(dev);
4560 Serge 4536
}
4537
 
4538
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4539
				 int max_pixclk)
4540
{
5060 serge 4541
	int vco = valleyview_get_vco(dev_priv);
4542
	int freq_320 = (vco <<  1) % 320000 != 0 ? 333333 : 320000;
4560 Serge 4543
 
4544
	/*
4545
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
4546
	 *   200MHz
4547
	 *   267MHz
5060 serge 4548
	 *   320/333MHz (depends on HPLL freq)
4560 Serge 4549
	 *   400MHz
4550
	 * So we check to see whether we're above 90% of the lower bin and
4551
	 * adjust if needed.
5060 serge 4552
	 *
4553
	 * We seem to get an unstable or solid color picture at 200MHz.
4554
	 * Not sure what's wrong. For now use 200MHz only when all pipes
4555
	 * are off.
4560 Serge 4556
	 */
5060 serge 4557
	if (max_pixclk > freq_320*9/10)
4558
		return 400000;
4559
	else if (max_pixclk > 266667*9/10)
4560
		return freq_320;
4561
	else if (max_pixclk > 0)
4562
		return 266667;
4563
	else
4564
		return 200000;
4560 Serge 4565
}
4566
 
5060 serge 4567
/* compute the max pixel clock for new configuration */
4568
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4560 Serge 4569
{
4570
	struct drm_device *dev = dev_priv->dev;
4571
	struct intel_crtc *intel_crtc;
4572
	int max_pixclk = 0;
4573
 
5060 serge 4574
	for_each_intel_crtc(dev, intel_crtc) {
4575
		if (intel_crtc->new_enabled)
4560 Serge 4576
			max_pixclk = max(max_pixclk,
5060 serge 4577
					 intel_crtc->new_config->adjusted_mode.crtc_clock);
4560 Serge 4578
	}
4579
 
4580
	return max_pixclk;
4581
}
4582
 
4583
static void valleyview_modeset_global_pipes(struct drm_device *dev,
5060 serge 4584
					    unsigned *prepare_pipes)
4560 Serge 4585
{
4586
	struct drm_i915_private *dev_priv = dev->dev_private;
4587
	struct intel_crtc *intel_crtc;
5060 serge 4588
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4560 Serge 4589
 
5060 serge 4590
	if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4591
	    dev_priv->vlv_cdclk_freq)
4560 Serge 4592
		return;
4593
 
5060 serge 4594
	/* disable/enable all currently active pipes while we change cdclk */
4595
	for_each_intel_crtc(dev, intel_crtc)
4560 Serge 4596
		if (intel_crtc->base.enabled)
4597
			*prepare_pipes |= (1 << intel_crtc->pipe);
4598
}
4599
 
4600
static void valleyview_modeset_global_resources(struct drm_device *dev)
4601
{
4602
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 4603
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4560 Serge 4604
	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4605
 
5060 serge 4606
	if (req_cdclk != dev_priv->vlv_cdclk_freq)
4560 Serge 4607
		valleyview_set_cdclk(dev, req_cdclk);
5060 serge 4608
	modeset_update_crtc_power_domains(dev);
4560 Serge 4609
}
4610
 
4104 Serge 4611
static void valleyview_crtc_enable(struct drm_crtc *crtc)
4612
{
4613
	struct drm_device *dev = crtc->dev;
4614
	struct drm_i915_private *dev_priv = dev->dev_private;
4615
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4616
	struct intel_encoder *encoder;
4617
	int pipe = intel_crtc->pipe;
4618
	int plane = intel_crtc->plane;
4560 Serge 4619
	bool is_dsi;
5060 serge 4620
	u32 dspcntr;
4104 Serge 4621
 
4622
	WARN_ON(!crtc->enabled);
4623
 
4624
	if (intel_crtc->active)
4625
		return;
4626
 
5060 serge 4627
	is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4628
 
4629
	if (!is_dsi && !IS_CHERRYVIEW(dev))
4630
	vlv_prepare_pll(intel_crtc);
4631
 
4632
	/* Set up the display plane register */
4633
	dspcntr = DISPPLANE_GAMMA_ENABLE;
4634
 
4635
	if (intel_crtc->config.has_dp_encoder)
4636
		intel_dp_set_m_n(intel_crtc);
4637
 
4638
	intel_set_pipe_timings(intel_crtc);
4639
 
4640
	/* pipesrc and dspsize control the size that is scaled from,
4641
	 * which should always be the user's requested size.
4642
	 */
4643
	I915_WRITE(DSPSIZE(plane),
4644
		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
4645
		   (intel_crtc->config.pipe_src_w - 1));
4646
	I915_WRITE(DSPPOS(plane), 0);
4647
 
4648
	i9xx_set_pipeconf(intel_crtc);
4649
 
4650
	I915_WRITE(DSPCNTR(plane), dspcntr);
4651
	POSTING_READ(DSPCNTR(plane));
4652
 
4653
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4654
					       crtc->x, crtc->y);
4655
 
4104 Serge 4656
	intel_crtc->active = true;
4657
 
5060 serge 4658
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4659
 
4104 Serge 4660
	for_each_encoder_on_crtc(dev, crtc, encoder)
4661
		if (encoder->pre_pll_enable)
4662
			encoder->pre_pll_enable(encoder);
4663
 
5060 serge 4664
	if (!is_dsi) {
4665
		if (IS_CHERRYVIEW(dev))
4666
			chv_enable_pll(intel_crtc);
4667
		else
4104 Serge 4668
	vlv_enable_pll(intel_crtc);
5060 serge 4669
	}
4104 Serge 4670
 
4671
	for_each_encoder_on_crtc(dev, crtc, encoder)
4672
		if (encoder->pre_enable)
4673
			encoder->pre_enable(encoder);
4674
 
4675
	i9xx_pfit_enable(intel_crtc);
4676
 
4677
	intel_crtc_load_lut(crtc);
4678
 
4560 Serge 4679
	intel_update_watermarks(crtc);
5060 serge 4680
	intel_enable_pipe(intel_crtc);
4104 Serge 4681
 
4682
	for_each_encoder_on_crtc(dev, crtc, encoder)
4683
		encoder->enable(encoder);
5060 serge 4684
 
4685
	intel_crtc_enable_planes(crtc);
4686
 
4687
	/* Underruns don't raise interrupts, so check manually. */
4688
	i9xx_check_fifo_underruns(dev);
4104 Serge 4689
}
4690
 
5060 serge 4691
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4692
{
4693
	struct drm_device *dev = crtc->base.dev;
4694
	struct drm_i915_private *dev_priv = dev->dev_private;
4695
 
4696
	I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
4697
	I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
4698
}
4699
 
2327 Serge 4700
static void i9xx_crtc_enable(struct drm_crtc *crtc)
4701
{
4702
    struct drm_device *dev = crtc->dev;
4703
    struct drm_i915_private *dev_priv = dev->dev_private;
4704
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4705
	struct intel_encoder *encoder;
2327 Serge 4706
    int pipe = intel_crtc->pipe;
4707
    int plane = intel_crtc->plane;
5060 serge 4708
	u32 dspcntr;
2327 Serge 4709
 
3031 serge 4710
	WARN_ON(!crtc->enabled);
4711
 
2327 Serge 4712
    if (intel_crtc->active)
4713
        return;
4714
 
5060 serge 4715
	i9xx_set_pll_dividers(intel_crtc);
4716
 
4717
	/* Set up the display plane register */
4718
	dspcntr = DISPPLANE_GAMMA_ENABLE;
4719
 
4720
	if (pipe == 0)
4721
		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4722
	else
4723
		dspcntr |= DISPPLANE_SEL_PIPE_B;
4724
 
4725
	if (intel_crtc->config.has_dp_encoder)
4726
		intel_dp_set_m_n(intel_crtc);
4727
 
4728
	intel_set_pipe_timings(intel_crtc);
4729
 
4730
	/* pipesrc and dspsize control the size that is scaled from,
4731
	 * which should always be the user's requested size.
4732
	 */
4733
	I915_WRITE(DSPSIZE(plane),
4734
		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
4735
		   (intel_crtc->config.pipe_src_w - 1));
4736
	I915_WRITE(DSPPOS(plane), 0);
4737
 
4738
	i9xx_set_pipeconf(intel_crtc);
4739
 
4740
	I915_WRITE(DSPCNTR(plane), dspcntr);
4741
	POSTING_READ(DSPCNTR(plane));
4742
 
4743
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4744
					       crtc->x, crtc->y);
4745
 
2327 Serge 4746
    intel_crtc->active = true;
4747
 
5060 serge 4748
	if (!IS_GEN2(dev))
4749
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4750
 
3480 Serge 4751
	for_each_encoder_on_crtc(dev, crtc, encoder)
4752
		if (encoder->pre_enable)
4753
			encoder->pre_enable(encoder);
4754
 
4104 Serge 4755
	i9xx_enable_pll(intel_crtc);
4756
 
4757
	i9xx_pfit_enable(intel_crtc);
4758
 
4759
	intel_crtc_load_lut(crtc);
4760
 
4560 Serge 4761
	intel_update_watermarks(crtc);
5060 serge 4762
	intel_enable_pipe(intel_crtc);
2327 Serge 4763
 
5060 serge 4764
	for_each_encoder_on_crtc(dev, crtc, encoder)
4765
		encoder->enable(encoder);
3031 serge 4766
 
5060 serge 4767
	intel_crtc_enable_planes(crtc);
4104 Serge 4768
 
5060 serge 4769
	/*
4770
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4771
	 * So don't enable underrun reporting before at least some planes
4772
	 * are enabled.
4773
	 * FIXME: Need to fix the logic to work when we turn off all planes
4774
	 * but leave the pipe running.
4775
	 */
4776
	if (IS_GEN2(dev))
4777
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4778
 
4779
	/* Underruns don't raise interrupts, so check manually. */
4780
	i9xx_check_fifo_underruns(dev);
2327 Serge 4781
}
4782
 
3746 Serge 4783
static void i9xx_pfit_disable(struct intel_crtc *crtc)
4784
{
4785
	struct drm_device *dev = crtc->base.dev;
4786
	struct drm_i915_private *dev_priv = dev->dev_private;
4787
 
4104 Serge 4788
	if (!crtc->config.gmch_pfit.control)
4789
		return;
4790
 
3746 Serge 4791
	assert_pipe_disabled(dev_priv, crtc->pipe);
4792
 
4104 Serge 4793
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4794
			 I915_READ(PFIT_CONTROL));
3746 Serge 4795
		I915_WRITE(PFIT_CONTROL, 0);
4796
}
4797
 
2327 Serge 4798
static void i9xx_crtc_disable(struct drm_crtc *crtc)
4799
{
4800
    struct drm_device *dev = crtc->dev;
4801
    struct drm_i915_private *dev_priv = dev->dev_private;
4802
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4803
	struct intel_encoder *encoder;
2327 Serge 4804
    int pipe = intel_crtc->pipe;
4805
 
4806
    if (!intel_crtc->active)
4807
        return;
4808
 
5060 serge 4809
	/*
4810
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4811
	 * So diasble underrun reporting before all the planes get disabled.
4812
	 * FIXME: Need to fix the logic to work when we turn off all planes
4813
	 * but leave the pipe running.
4814
	 */
4815
	if (IS_GEN2(dev))
4816
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4817
 
4818
	/*
4819
	 * Vblank time updates from the shadow to live plane control register
4820
	 * are blocked if the memory self-refresh mode is active at that
4821
	 * moment. So to make sure the plane gets truly disabled, disable
4822
	 * first the self-refresh mode. The self-refresh enable bit in turn
4823
	 * will be checked/applied by the HW only at the next frame start
4824
	 * event which is after the vblank start event, so we need to have a
4825
	 * wait-for-vblank between disabling the plane and the pipe.
4826
	 */
4827
	intel_set_memory_cxsr(dev_priv, false);
4828
	intel_crtc_disable_planes(crtc);
4829
 
3031 serge 4830
	for_each_encoder_on_crtc(dev, crtc, encoder)
4831
		encoder->disable(encoder);
4832
 
5060 serge 4833
	/*
4834
	 * On gen2 planes are double buffered but the pipe isn't, so we must
4835
	 * wait for planes to fully turn off before disabling the pipe.
4836
	 * We also need to wait on all gmch platforms because of the
4837
	 * self-refresh mode constraint explained above.
4838
	 */
4839
		intel_wait_for_vblank(dev, pipe);
2327 Serge 4840
 
4841
    intel_disable_pipe(dev_priv, pipe);
3480 Serge 4842
 
3746 Serge 4843
	i9xx_pfit_disable(intel_crtc);
3480 Serge 4844
 
4104 Serge 4845
	for_each_encoder_on_crtc(dev, crtc, encoder)
4846
		if (encoder->post_disable)
4847
			encoder->post_disable(encoder);
2327 Serge 4848
 
5060 serge 4849
	if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
4850
		if (IS_CHERRYVIEW(dev))
4851
			chv_disable_pll(dev_priv, pipe);
4852
		else if (IS_VALLEYVIEW(dev))
4557 Serge 4853
		vlv_disable_pll(dev_priv, pipe);
5060 serge 4854
		else
4104 Serge 4855
	i9xx_disable_pll(dev_priv, pipe);
5060 serge 4856
	}
4104 Serge 4857
 
5060 serge 4858
	if (!IS_GEN2(dev))
4859
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4860
 
2327 Serge 4861
    intel_crtc->active = false;
4560 Serge 4862
	intel_update_watermarks(crtc);
4863
 
5060 serge 4864
	mutex_lock(&dev->struct_mutex);
2327 Serge 4865
    intel_update_fbc(dev);
5060 serge 4866
	mutex_unlock(&dev->struct_mutex);
2327 Serge 4867
}
4868
 
3031 serge 4869
static void i9xx_crtc_off(struct drm_crtc *crtc)
2327 Serge 4870
{
4871
}
4872
 
3031 serge 4873
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4874
				    bool enabled)
2330 Serge 4875
{
4876
	struct drm_device *dev = crtc->dev;
4877
	struct drm_i915_master_private *master_priv;
4878
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4879
	int pipe = intel_crtc->pipe;
2327 Serge 4880
 
4881
 
2340 Serge 4882
#if 0
2330 Serge 4883
	if (!dev->primary->master)
4884
		return;
2327 Serge 4885
 
2330 Serge 4886
	master_priv = dev->primary->master->driver_priv;
4887
	if (!master_priv->sarea_priv)
4888
		return;
2327 Serge 4889
 
2330 Serge 4890
	switch (pipe) {
4891
	case 0:
4892
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
4893
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
4894
		break;
4895
	case 1:
4896
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
4897
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
4898
		break;
4899
	default:
4900
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
4901
		break;
4902
	}
2340 Serge 4903
#endif
5060 serge 4904
}
2340 Serge 4905
 
5060 serge 4906
/* Master function to enable/disable CRTC and corresponding power wells */
4907
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
4908
{
4909
	struct drm_device *dev = crtc->dev;
4910
	struct drm_i915_private *dev_priv = dev->dev_private;
4911
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4912
	enum intel_display_power_domain domain;
4913
	unsigned long domains;
4914
 
4915
	if (enable) {
4916
		if (!intel_crtc->active) {
4917
			domains = get_crtc_power_domains(crtc);
4918
			for_each_power_domain(domain, domains)
4919
				intel_display_power_get(dev_priv, domain);
4920
			intel_crtc->enabled_power_domains = domains;
4921
 
4922
			dev_priv->display.crtc_enable(crtc);
4923
		}
4924
	} else {
4925
		if (intel_crtc->active) {
4926
			dev_priv->display.crtc_disable(crtc);
4927
 
4928
			domains = intel_crtc->enabled_power_domains;
4929
			for_each_power_domain(domain, domains)
4930
				intel_display_power_put(dev_priv, domain);
4931
			intel_crtc->enabled_power_domains = 0;
4932
		}
4933
	}
2330 Serge 4934
}
2327 Serge 4935
 
3031 serge 4936
/**
4937
 * Sets the power management mode of the pipe and plane.
4938
 */
4939
void intel_crtc_update_dpms(struct drm_crtc *crtc)
4940
{
4941
	struct drm_device *dev = crtc->dev;
4942
	struct intel_encoder *intel_encoder;
4943
	bool enable = false;
4944
 
4945
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4946
		enable |= intel_encoder->connectors_active;
4947
 
5060 serge 4948
	intel_crtc_control(crtc, enable);
3031 serge 4949
 
4950
	intel_crtc_update_sarea(crtc, enable);
4951
}
4952
 
2330 Serge 4953
static void intel_crtc_disable(struct drm_crtc *crtc)
4954
{
4955
	struct drm_device *dev = crtc->dev;
3031 serge 4956
	struct drm_connector *connector;
4957
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 4958
	struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
4959
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2327 Serge 4960
 
3031 serge 4961
	/* crtc should still be enabled when we disable it. */
4962
	WARN_ON(!crtc->enabled);
2327 Serge 4963
 
4104 Serge 4964
	dev_priv->display.crtc_disable(crtc);
3031 serge 4965
	intel_crtc_update_sarea(crtc, false);
4966
	dev_priv->display.off(crtc);
4967
 
5060 serge 4968
	if (crtc->primary->fb) {
4280 Serge 4969
		mutex_lock(&dev->struct_mutex);
5060 serge 4970
		intel_unpin_fb_obj(old_obj);
4971
		i915_gem_track_fb(old_obj, NULL,
4972
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
4280 Serge 4973
		mutex_unlock(&dev->struct_mutex);
5060 serge 4974
		crtc->primary->fb = NULL;
4280 Serge 4975
	}
3031 serge 4976
 
4977
	/* Update computed state. */
4978
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4979
		if (!connector->encoder || !connector->encoder->crtc)
4980
			continue;
4981
 
4982
		if (connector->encoder->crtc != crtc)
4983
			continue;
4984
 
4985
		connector->dpms = DRM_MODE_DPMS_OFF;
4986
		to_intel_encoder(connector->encoder)->connectors_active = false;
2330 Serge 4987
	}
4988
}
2327 Serge 4989
 
3031 serge 4990
void intel_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 4991
{
3031 serge 4992
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4993
 
4994
	drm_encoder_cleanup(encoder);
4995
	kfree(intel_encoder);
2330 Serge 4996
}
2327 Serge 4997
 
4104 Serge 4998
/* Simple dpms helper for encoders with just one connector, no cloning and only
3031 serge 4999
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
5000
 * state of the entire output pipe. */
4104 Serge 5001
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
2330 Serge 5002
{
3031 serge 5003
	if (mode == DRM_MODE_DPMS_ON) {
5004
		encoder->connectors_active = true;
5005
 
5006
		intel_crtc_update_dpms(encoder->base.crtc);
5007
	} else {
5008
		encoder->connectors_active = false;
5009
 
5010
		intel_crtc_update_dpms(encoder->base.crtc);
5011
	}
2330 Serge 5012
}
2327 Serge 5013
 
3031 serge 5014
/* Cross check the actual hw state with our own modeset state tracking (and it's
5015
 * internal consistency). */
5016
static void intel_connector_check_state(struct intel_connector *connector)
2330 Serge 5017
{
3031 serge 5018
	if (connector->get_hw_state(connector)) {
5019
		struct intel_encoder *encoder = connector->encoder;
5020
		struct drm_crtc *crtc;
5021
		bool encoder_enabled;
5022
		enum pipe pipe;
5023
 
5024
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5025
			      connector->base.base.id,
5060 serge 5026
			      connector->base.name);
3031 serge 5027
 
5060 serge 5028
		/* there is no real hw state for MST connectors */
5029
		if (connector->mst_port)
5030
			return;
5031
 
3031 serge 5032
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
5033
		     "wrong connector dpms state\n");
5034
		WARN(connector->base.encoder != &encoder->base,
5035
		     "active connector not linked to encoder\n");
5060 serge 5036
 
5037
		if (encoder) {
3031 serge 5038
		WARN(!encoder->connectors_active,
5039
		     "encoder->connectors_active not set\n");
5040
 
5041
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5042
		WARN(!encoder_enabled, "encoder not enabled\n");
5043
		if (WARN_ON(!encoder->base.crtc))
5044
			return;
5045
 
5046
		crtc = encoder->base.crtc;
5047
 
5048
		WARN(!crtc->enabled, "crtc not enabled\n");
5049
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5050
		WARN(pipe != to_intel_crtc(crtc)->pipe,
5051
		     "encoder active on the wrong pipe\n");
5052
	}
5060 serge 5053
	}
2330 Serge 5054
}
2327 Serge 5055
 
3031 serge 5056
/* Even simpler default implementation, if there's really no special case to
5057
 * consider. */
5058
void intel_connector_dpms(struct drm_connector *connector, int mode)
2330 Serge 5059
{
3031 serge 5060
	/* All the simple cases only support two dpms states. */
5061
	if (mode != DRM_MODE_DPMS_ON)
5062
		mode = DRM_MODE_DPMS_OFF;
2342 Serge 5063
 
3031 serge 5064
	if (mode == connector->dpms)
5065
		return;
5066
 
5067
	connector->dpms = mode;
5068
 
5069
	/* Only need to change hw state when actually enabled */
4104 Serge 5070
	if (connector->encoder)
5071
		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
3031 serge 5072
 
5073
	intel_modeset_check_state(connector->dev);
2330 Serge 5074
}
2327 Serge 5075
 
3031 serge 5076
/* Simple connector->get_hw_state implementation for encoders that support only
5077
 * one connector and no cloning and hence the encoder state determines the state
5078
 * of the connector. */
5079
bool intel_connector_get_hw_state(struct intel_connector *connector)
2330 Serge 5080
{
3031 serge 5081
	enum pipe pipe = 0;
5082
	struct intel_encoder *encoder = connector->encoder;
2330 Serge 5083
 
3031 serge 5084
	return encoder->get_hw_state(encoder, &pipe);
2330 Serge 5085
}
5086
 
4104 Serge 5087
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5088
				     struct intel_crtc_config *pipe_config)
5089
{
5090
	struct drm_i915_private *dev_priv = dev->dev_private;
5091
	struct intel_crtc *pipe_B_crtc =
5092
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5093
 
5094
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5095
		      pipe_name(pipe), pipe_config->fdi_lanes);
5096
	if (pipe_config->fdi_lanes > 4) {
5097
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5098
			      pipe_name(pipe), pipe_config->fdi_lanes);
5099
		return false;
5100
	}
5101
 
4560 Serge 5102
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4104 Serge 5103
		if (pipe_config->fdi_lanes > 2) {
5104
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5105
				      pipe_config->fdi_lanes);
5106
			return false;
5107
		} else {
5108
			return true;
5109
		}
5110
	}
5111
 
5112
	if (INTEL_INFO(dev)->num_pipes == 2)
5113
		return true;
5114
 
5115
	/* Ivybridge 3 pipe is really complicated */
5116
	switch (pipe) {
5117
	case PIPE_A:
5118
		return true;
5119
	case PIPE_B:
5120
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5121
		    pipe_config->fdi_lanes > 2) {
5122
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5123
				      pipe_name(pipe), pipe_config->fdi_lanes);
5124
			return false;
5125
		}
5126
		return true;
5127
	case PIPE_C:
5128
		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5129
		    pipe_B_crtc->config.fdi_lanes <= 2) {
5130
			if (pipe_config->fdi_lanes > 2) {
5131
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5132
					      pipe_name(pipe), pipe_config->fdi_lanes);
5133
				return false;
5134
			}
5135
		} else {
5136
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5137
			return false;
5138
		}
5139
		return true;
5140
	default:
5141
		BUG();
5142
	}
5143
}
5144
 
5145
#define RETRY 1
5146
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
3746 Serge 5147
				      struct intel_crtc_config *pipe_config)
2330 Serge 5148
{
4104 Serge 5149
	struct drm_device *dev = intel_crtc->base.dev;
3746 Serge 5150
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4104 Serge 5151
	int lane, link_bw, fdi_dotclock;
5152
	bool setup_ok, needs_recompute = false;
2330 Serge 5153
 
4104 Serge 5154
retry:
5155
	/* FDI is a binary signal running at ~2.7GHz, encoding
5156
	 * each output octet as 10 bits. The actual frequency
5157
	 * is stored as a divider into a 100MHz clock, and the
5158
	 * mode pixel clock is stored in units of 1KHz.
5159
	 * Hence the bw of each lane in terms of the mode signal
5160
	 * is:
5161
	 */
5162
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5163
 
4560 Serge 5164
	fdi_dotclock = adjusted_mode->crtc_clock;
4104 Serge 5165
 
5166
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5167
					   pipe_config->pipe_bpp);
5168
 
5169
	pipe_config->fdi_lanes = lane;
5170
 
5171
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5172
			       link_bw, &pipe_config->fdi_m_n);
5173
 
5174
	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5175
					    intel_crtc->pipe, pipe_config);
5176
	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5177
		pipe_config->pipe_bpp -= 2*3;
5178
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5179
			      pipe_config->pipe_bpp);
5180
		needs_recompute = true;
5181
		pipe_config->bw_constrained = true;
5182
 
5183
		goto retry;
5184
	}
5185
 
5186
	if (needs_recompute)
5187
		return RETRY;
5188
 
5189
	return setup_ok ? 0 : -EINVAL;
5190
}
5191
 
5192
static void hsw_compute_ips_config(struct intel_crtc *crtc,
5193
				   struct intel_crtc_config *pipe_config)
5194
{
5060 serge 5195
	pipe_config->ips_enabled = i915.enable_ips &&
4104 Serge 5196
				   hsw_crtc_supports_ips(crtc) &&
5197
				   pipe_config->pipe_bpp <= 24;
5198
}
5199
 
5200
static int intel_crtc_compute_config(struct intel_crtc *crtc,
5201
				     struct intel_crtc_config *pipe_config)
5202
{
5203
	struct drm_device *dev = crtc->base.dev;
5204
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5205
 
4560 Serge 5206
	/* FIXME should check pixel clock limits on all platforms */
5207
	if (INTEL_INFO(dev)->gen < 4) {
5208
		struct drm_i915_private *dev_priv = dev->dev_private;
5209
		int clock_limit =
5210
			dev_priv->display.get_display_clock_speed(dev);
5211
 
5212
		/*
5213
		 * Enable pixel doubling when the dot clock
5214
		 * is > 90% of the (display) core speed.
5215
		 *
5216
		 * GDG double wide on either pipe,
5217
		 * otherwise pipe A only.
5218
		 */
5219
		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5220
		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5221
			clock_limit *= 2;
5222
			pipe_config->double_wide = true;
5223
		}
5224
 
5225
		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4104 Serge 5226
			return -EINVAL;
2330 Serge 5227
	}
5228
 
4560 Serge 5229
	/*
5230
	 * Pipe horizontal size must be even in:
5231
	 * - DVO ganged mode
5232
	 * - LVDS dual channel mode
5233
	 * - Double wide pipe
5234
	 */
5235
	if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5236
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5237
		pipe_config->pipe_src_w &= ~1;
5238
 
4104 Serge 5239
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
5240
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
3031 serge 5241
	 */
5242
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5243
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
4104 Serge 5244
		return -EINVAL;
3031 serge 5245
 
3746 Serge 5246
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5247
		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5248
	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5249
		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
5250
		 * for lvds. */
5251
		pipe_config->pipe_bpp = 8*3;
5252
	}
5253
 
4104 Serge 5254
	if (HAS_IPS(dev))
5255
		hsw_compute_ips_config(crtc, pipe_config);
5256
 
5060 serge 5257
	/*
5258
	 * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
5259
	 * old clock survives for now.
5260
	 */
5261
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
4104 Serge 5262
		pipe_config->shared_dpll = crtc->config.shared_dpll;
5263
 
5264
	if (pipe_config->has_pch_encoder)
5265
		return ironlake_fdi_compute_config(crtc, pipe_config);
5266
 
5267
	return 0;
2330 Serge 5268
}
5269
 
3031 serge 5270
static int valleyview_get_display_clock_speed(struct drm_device *dev)
5271
{
5060 serge 5272
	struct drm_i915_private *dev_priv = dev->dev_private;
5273
	int vco = valleyview_get_vco(dev_priv);
5274
	u32 val;
5275
	int divider;
5276
 
5277
	mutex_lock(&dev_priv->dpio_lock);
5278
	val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5279
	mutex_unlock(&dev_priv->dpio_lock);
5280
 
5281
	divider = val & DISPLAY_FREQUENCY_VALUES;
5282
 
5283
	WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5284
	     (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5285
	     "cdclk change in progress\n");
5286
 
5287
	return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
3031 serge 5288
}
5289
 
2327 Serge 5290
static int i945_get_display_clock_speed(struct drm_device *dev)
5291
{
5292
	return 400000;
5293
}
5294
 
5295
static int i915_get_display_clock_speed(struct drm_device *dev)
5296
{
5297
	return 333000;
5298
}
5299
 
5300
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5301
{
5302
	return 200000;
5303
}
5304
 
4104 Serge 5305
static int pnv_get_display_clock_speed(struct drm_device *dev)
5306
{
5307
	u16 gcfgc = 0;
5308
 
5309
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5310
 
5311
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5312
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5313
		return 267000;
5314
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5315
		return 333000;
5316
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5317
		return 444000;
5318
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5319
		return 200000;
5320
	default:
5321
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5322
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5323
		return 133000;
5324
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5325
		return 167000;
5326
	}
5327
}
5328
 
2327 Serge 5329
static int i915gm_get_display_clock_speed(struct drm_device *dev)
5330
{
5331
	u16 gcfgc = 0;
5332
 
5333
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5334
 
5335
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5336
		return 133000;
5337
	else {
5338
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5339
		case GC_DISPLAY_CLOCK_333_MHZ:
5340
			return 333000;
5341
		default:
5342
		case GC_DISPLAY_CLOCK_190_200_MHZ:
5343
			return 190000;
5344
		}
5345
	}
5346
}
5347
 
5348
static int i865_get_display_clock_speed(struct drm_device *dev)
5349
{
5350
	return 266000;
5351
}
5352
 
5353
static int i855_get_display_clock_speed(struct drm_device *dev)
5354
{
5355
	u16 hpllcc = 0;
5356
	/* Assume that the hardware is in the high speed state.  This
5357
	 * should be the default.
5358
	 */
5359
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5360
	case GC_CLOCK_133_200:
5361
	case GC_CLOCK_100_200:
5362
		return 200000;
5363
	case GC_CLOCK_166_250:
5364
		return 250000;
5365
	case GC_CLOCK_100_133:
5366
		return 133000;
5367
	}
5368
 
5369
	/* Shouldn't happen */
5370
	return 0;
5371
}
5372
 
5373
static int i830_get_display_clock_speed(struct drm_device *dev)
5374
{
5375
	return 133000;
5376
}
5377
 
5378
static void
3746 Serge 5379
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2327 Serge 5380
{
3746 Serge 5381
	while (*num > DATA_LINK_M_N_MASK ||
5382
	       *den > DATA_LINK_M_N_MASK) {
2327 Serge 5383
		*num >>= 1;
5384
		*den >>= 1;
5385
	}
5386
}
5387
 
3746 Serge 5388
static void compute_m_n(unsigned int m, unsigned int n,
5389
			uint32_t *ret_m, uint32_t *ret_n)
5390
{
5391
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5392
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
5393
	intel_reduce_m_n_ratio(ret_m, ret_n);
5394
}
5395
 
3480 Serge 5396
void
5397
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5398
		       int pixel_clock, int link_clock,
5399
		       struct intel_link_m_n *m_n)
2327 Serge 5400
{
3480 Serge 5401
	m_n->tu = 64;
3746 Serge 5402
 
5403
	compute_m_n(bits_per_pixel * pixel_clock,
5404
		    link_clock * nlanes * 8,
5405
		    &m_n->gmch_m, &m_n->gmch_n);
5406
 
5407
	compute_m_n(pixel_clock, link_clock,
5408
		    &m_n->link_m, &m_n->link_n);
2327 Serge 5409
}
5410
 
5411
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5412
{
5060 serge 5413
	if (i915.panel_use_ssc >= 0)
5414
		return i915.panel_use_ssc != 0;
4104 Serge 5415
	return dev_priv->vbt.lvds_use_ssc
2327 Serge 5416
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5417
}
5418
 
3031 serge 5419
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5420
{
5421
	struct drm_device *dev = crtc->dev;
5422
	struct drm_i915_private *dev_priv = dev->dev_private;
5423
	int refclk;
2327 Serge 5424
 
3031 serge 5425
	if (IS_VALLEYVIEW(dev)) {
4560 Serge 5426
		refclk = 100000;
3031 serge 5427
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5428
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 5429
		refclk = dev_priv->vbt.lvds_ssc_freq;
5430
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
3031 serge 5431
	} else if (!IS_GEN2(dev)) {
5432
		refclk = 96000;
5433
	} else {
5434
		refclk = 48000;
5435
	}
2327 Serge 5436
 
3031 serge 5437
	return refclk;
5438
}
2327 Serge 5439
 
4104 Serge 5440
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
3031 serge 5441
{
4104 Serge 5442
	return (1 << dpll->n) << 16 | dpll->m2;
5443
}
3746 Serge 5444
 
4104 Serge 5445
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5446
{
5447
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
3031 serge 5448
}
2327 Serge 5449
 
3746 Serge 5450
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
3031 serge 5451
				     intel_clock_t *reduced_clock)
5452
{
3746 Serge 5453
	struct drm_device *dev = crtc->base.dev;
3031 serge 5454
	u32 fp, fp2 = 0;
2327 Serge 5455
 
3031 serge 5456
	if (IS_PINEVIEW(dev)) {
4104 Serge 5457
		fp = pnv_dpll_compute_fp(&crtc->config.dpll);
3031 serge 5458
		if (reduced_clock)
4104 Serge 5459
			fp2 = pnv_dpll_compute_fp(reduced_clock);
3031 serge 5460
	} else {
4104 Serge 5461
		fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
3031 serge 5462
		if (reduced_clock)
4104 Serge 5463
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
3031 serge 5464
	}
2327 Serge 5465
 
4104 Serge 5466
	crtc->config.dpll_hw_state.fp0 = fp;
2327 Serge 5467
 
3746 Serge 5468
	crtc->lowfreq_avail = false;
5469
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5060 serge 5470
	    reduced_clock && i915.powersave) {
4104 Serge 5471
		crtc->config.dpll_hw_state.fp1 = fp2;
3746 Serge 5472
		crtc->lowfreq_avail = true;
3031 serge 5473
	} else {
4104 Serge 5474
		crtc->config.dpll_hw_state.fp1 = fp;
3031 serge 5475
	}
5476
}
2327 Serge 5477
 
4560 Serge 5478
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5479
		pipe)
4104 Serge 5480
{
5481
	u32 reg_val;
5482
 
5483
	/*
5484
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
5485
	 * and set it to a reasonable value instead.
5486
	 */
4560 Serge 5487
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 5488
	reg_val &= 0xffffff00;
5489
	reg_val |= 0x00000030;
4560 Serge 5490
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 5491
 
4560 Serge 5492
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 5493
	reg_val &= 0x8cffffff;
5494
	reg_val = 0x8c000000;
4560 Serge 5495
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 5496
 
4560 Serge 5497
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 5498
	reg_val &= 0xffffff00;
4560 Serge 5499
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 5500
 
4560 Serge 5501
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 5502
	reg_val &= 0x00ffffff;
5503
	reg_val |= 0xb0000000;
4560 Serge 5504
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 5505
}
5506
 
5507
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5508
					 struct intel_link_m_n *m_n)
5509
{
5510
	struct drm_device *dev = crtc->base.dev;
5511
	struct drm_i915_private *dev_priv = dev->dev_private;
5512
	int pipe = crtc->pipe;
5513
 
5514
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5515
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5516
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5517
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5518
}
5519
 
5520
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5521
					 struct intel_link_m_n *m_n)
5522
{
5523
	struct drm_device *dev = crtc->base.dev;
5524
	struct drm_i915_private *dev_priv = dev->dev_private;
5525
	int pipe = crtc->pipe;
5526
	enum transcoder transcoder = crtc->config.cpu_transcoder;
5527
 
5528
	if (INTEL_INFO(dev)->gen >= 5) {
5529
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5530
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5531
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5532
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5533
	} else {
5534
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5535
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5536
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5537
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5538
	}
5539
}
5540
 
3746 Serge 5541
static void intel_dp_set_m_n(struct intel_crtc *crtc)
3031 serge 5542
{
3746 Serge 5543
	if (crtc->config.has_pch_encoder)
5544
		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5545
	else
5546
		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5547
}
5548
 
5549
static void vlv_update_pll(struct intel_crtc *crtc)
5550
{
5060 serge 5551
	u32 dpll, dpll_md;
5552
 
5553
	/*
5554
	 * Enable DPIO clock input. We should never disable the reference
5555
	 * clock for pipe B, since VGA hotplug / manual detection depends
5556
	 * on it.
5557
	 */
5558
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5559
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5560
	/* We should never disable this, set it here for state tracking */
5561
	if (crtc->pipe == PIPE_B)
5562
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5563
	dpll |= DPLL_VCO_ENABLE;
5564
	crtc->config.dpll_hw_state.dpll = dpll;
5565
 
5566
	dpll_md = (crtc->config.pixel_multiplier - 1)
5567
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5568
	crtc->config.dpll_hw_state.dpll_md = dpll_md;
5569
}
5570
 
5571
static void vlv_prepare_pll(struct intel_crtc *crtc)
5572
{
3746 Serge 5573
	struct drm_device *dev = crtc->base.dev;
3031 serge 5574
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 5575
	int pipe = crtc->pipe;
5060 serge 5576
	u32 mdiv;
3031 serge 5577
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
5060 serge 5578
	u32 coreclk, reg_val;
2327 Serge 5579
 
3480 Serge 5580
	mutex_lock(&dev_priv->dpio_lock);
5581
 
3746 Serge 5582
	bestn = crtc->config.dpll.n;
5583
	bestm1 = crtc->config.dpll.m1;
5584
	bestm2 = crtc->config.dpll.m2;
5585
	bestp1 = crtc->config.dpll.p1;
5586
	bestp2 = crtc->config.dpll.p2;
3031 serge 5587
 
4104 Serge 5588
	/* See eDP HDMI DPIO driver vbios notes doc */
5589
 
5590
	/* PLL B needs special handling */
5060 serge 5591
	if (pipe == PIPE_B)
4560 Serge 5592
		vlv_pllb_recal_opamp(dev_priv, pipe);
4104 Serge 5593
 
5594
	/* Set up Tx target for periodic Rcomp update */
4560 Serge 5595
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
4104 Serge 5596
 
5597
	/* Disable target IRef on PLL */
4560 Serge 5598
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
4104 Serge 5599
	reg_val &= 0x00ffffff;
4560 Serge 5600
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
4104 Serge 5601
 
5602
	/* Disable fast lock */
4560 Serge 5603
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
4104 Serge 5604
 
5605
	/* Set idtafcrecal before PLL is enabled */
3031 serge 5606
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5607
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5608
	mdiv |= ((bestn << DPIO_N_SHIFT));
5609
	mdiv |= (1 << DPIO_K_SHIFT);
4104 Serge 5610
 
5611
	/*
5612
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5613
	 * but we don't support that).
5614
	 * Note: don't use the DAC post divider as it seems unstable.
5615
	 */
5616
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4560 Serge 5617
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4104 Serge 5618
 
3031 serge 5619
	mdiv |= DPIO_ENABLE_CALIBRATION;
4560 Serge 5620
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
3031 serge 5621
 
4104 Serge 5622
	/* Set HBR and RBR LPF coefficients */
5623
	if (crtc->config.port_clock == 162000 ||
5624
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
5625
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4560 Serge 5626
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 5627
				 0x009f0003);
5628
	else
4560 Serge 5629
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 5630
				 0x00d0000f);
3031 serge 5631
 
4104 Serge 5632
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
5633
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
5634
		/* Use SSC source */
5060 serge 5635
		if (pipe == PIPE_A)
4560 Serge 5636
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5637
					 0x0df40000);
5638
		else
4560 Serge 5639
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5640
					 0x0df70000);
5641
	} else { /* HDMI or VGA */
5642
		/* Use bend source */
5060 serge 5643
		if (pipe == PIPE_A)
4560 Serge 5644
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5645
					 0x0df70000);
5646
		else
4560 Serge 5647
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5648
					 0x0df40000);
5649
	}
3031 serge 5650
 
4560 Serge 5651
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
4104 Serge 5652
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5653
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5654
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5655
		coreclk |= 0x01000000;
4560 Serge 5656
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
3031 serge 5657
 
4560 Serge 5658
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5060 serge 5659
	mutex_unlock(&dev_priv->dpio_lock);
5660
}
4104 Serge 5661
 
5060 serge 5662
static void chv_update_pll(struct intel_crtc *crtc)
5663
{
5664
	struct drm_device *dev = crtc->base.dev;
5665
	struct drm_i915_private *dev_priv = dev->dev_private;
5666
	int pipe = crtc->pipe;
5667
	int dpll_reg = DPLL(crtc->pipe);
5668
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
5669
	u32 loopfilter, intcoeff;
5670
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5671
	int refclk;
5672
 
5673
	crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5674
		DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5675
		DPLL_VCO_ENABLE;
5676
	if (pipe != PIPE_A)
5677
		crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5678
 
5679
	crtc->config.dpll_hw_state.dpll_md =
5680
		(crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5681
 
5682
	bestn = crtc->config.dpll.n;
5683
	bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
5684
	bestm1 = crtc->config.dpll.m1;
5685
	bestm2 = crtc->config.dpll.m2 >> 22;
5686
	bestp1 = crtc->config.dpll.p1;
5687
	bestp2 = crtc->config.dpll.p2;
5688
 
4560 Serge 5689
	/*
5060 serge 5690
	 * Enable Refclk and SSC
4560 Serge 5691
	 */
5060 serge 5692
	I915_WRITE(dpll_reg,
5693
		   crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
3031 serge 5694
 
5060 serge 5695
	mutex_lock(&dev_priv->dpio_lock);
3031 serge 5696
 
5060 serge 5697
	/* p1 and p2 divider */
5698
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
5699
			5 << DPIO_CHV_S1_DIV_SHIFT |
5700
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
5701
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
5702
			1 << DPIO_CHV_K_DIV_SHIFT);
3243 Serge 5703
 
5060 serge 5704
	/* Feedback post-divider - m2 */
5705
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
5706
 
5707
	/* Feedback refclk divider - n and m1 */
5708
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
5709
			DPIO_CHV_M1_DIV_BY_2 |
5710
			1 << DPIO_CHV_N_DIV_SHIFT);
5711
 
5712
	/* M2 fraction division */
5713
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
5714
 
5715
	/* M2 fraction division enable */
5716
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
5717
		       DPIO_CHV_FRAC_DIV_EN |
5718
		       (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
5719
 
5720
	/* Loop filter */
5721
	refclk = i9xx_get_refclk(&crtc->base, 0);
5722
	loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
5723
		2 << DPIO_CHV_GAIN_CTRL_SHIFT;
5724
	if (refclk == 100000)
5725
		intcoeff = 11;
5726
	else if (refclk == 38400)
5727
		intcoeff = 10;
5728
	else
5729
		intcoeff = 9;
5730
	loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
5731
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
5732
 
5733
	/* AFC Recal */
5734
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
5735
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
5736
			DPIO_AFC_RECAL);
5737
 
3480 Serge 5738
	mutex_unlock(&dev_priv->dpio_lock);
3031 serge 5739
}
5740
 
3746 Serge 5741
static void i9xx_update_pll(struct intel_crtc *crtc,
5742
			    intel_clock_t *reduced_clock,
3031 serge 5743
			    int num_connectors)
5744
{
3746 Serge 5745
	struct drm_device *dev = crtc->base.dev;
3031 serge 5746
	struct drm_i915_private *dev_priv = dev->dev_private;
5747
	u32 dpll;
5748
	bool is_sdvo;
3746 Serge 5749
	struct dpll *clock = &crtc->config.dpll;
3031 serge 5750
 
3746 Serge 5751
	i9xx_update_pll_dividers(crtc, reduced_clock);
3243 Serge 5752
 
3746 Serge 5753
	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
5754
		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
3031 serge 5755
 
5756
	dpll = DPLL_VGA_MODE_DIS;
5757
 
3746 Serge 5758
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
3031 serge 5759
		dpll |= DPLLB_MODE_LVDS;
5760
	else
5761
		dpll |= DPLLB_MODE_DAC_SERIAL;
3746 Serge 5762
 
4104 Serge 5763
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
3746 Serge 5764
			dpll |= (crtc->config.pixel_multiplier - 1)
5765
				<< SDVO_MULTIPLIER_SHIFT_HIRES;
2342 Serge 5766
		}
4104 Serge 5767
 
5768
	if (is_sdvo)
5769
		dpll |= DPLL_SDVO_HIGH_SPEED;
5770
 
3746 Serge 5771
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
4104 Serge 5772
		dpll |= DPLL_SDVO_HIGH_SPEED;
2342 Serge 5773
 
3031 serge 5774
	/* compute bitmask from p1 value */
5775
	if (IS_PINEVIEW(dev))
5776
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5777
	else {
5778
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5779
		if (IS_G4X(dev) && reduced_clock)
5780
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5781
	}
5782
	switch (clock->p2) {
5783
	case 5:
5784
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5785
		break;
5786
	case 7:
5787
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5788
		break;
5789
	case 10:
5790
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5791
		break;
5792
	case 14:
5793
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5794
		break;
5795
	}
5796
	if (INTEL_INFO(dev)->gen >= 4)
5797
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2327 Serge 5798
 
4104 Serge 5799
	if (crtc->config.sdvo_tv_clock)
3031 serge 5800
		dpll |= PLL_REF_INPUT_TVCLKINBC;
3746 Serge 5801
	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
3031 serge 5802
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5803
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5804
	else
5805
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 5806
 
3031 serge 5807
	dpll |= DPLL_VCO_ENABLE;
4104 Serge 5808
	crtc->config.dpll_hw_state.dpll = dpll;
2327 Serge 5809
 
4104 Serge 5810
	if (INTEL_INFO(dev)->gen >= 4) {
5811
		u32 dpll_md = (crtc->config.pixel_multiplier - 1)
5812
					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5813
		crtc->config.dpll_hw_state.dpll_md = dpll_md;
5814
	}
3031 serge 5815
}
2327 Serge 5816
 
3746 Serge 5817
static void i8xx_update_pll(struct intel_crtc *crtc,
5818
			    intel_clock_t *reduced_clock,
3031 serge 5819
			    int num_connectors)
5820
{
3746 Serge 5821
	struct drm_device *dev = crtc->base.dev;
3031 serge 5822
	struct drm_i915_private *dev_priv = dev->dev_private;
5823
	u32 dpll;
3746 Serge 5824
	struct dpll *clock = &crtc->config.dpll;
2327 Serge 5825
 
3746 Serge 5826
	i9xx_update_pll_dividers(crtc, reduced_clock);
3243 Serge 5827
 
3031 serge 5828
	dpll = DPLL_VGA_MODE_DIS;
2327 Serge 5829
 
3746 Serge 5830
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
3031 serge 5831
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5832
	} else {
5833
		if (clock->p1 == 2)
5834
			dpll |= PLL_P1_DIVIDE_BY_TWO;
5835
		else
5836
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5837
		if (clock->p2 == 4)
5838
			dpll |= PLL_P2_DIVIDE_BY_4;
5839
	}
2327 Serge 5840
 
4104 Serge 5841
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5842
		dpll |= DPLL_DVO_2X_MODE;
5843
 
3746 Serge 5844
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
3031 serge 5845
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5846
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5847
	else
5848
		dpll |= PLL_REF_INPUT_DREFCLK;
5849
 
5850
	dpll |= DPLL_VCO_ENABLE;
4104 Serge 5851
	crtc->config.dpll_hw_state.dpll = dpll;
3031 serge 5852
}
5853
 
4104 Serge 5854
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
3243 Serge 5855
{
5856
	struct drm_device *dev = intel_crtc->base.dev;
5857
	struct drm_i915_private *dev_priv = dev->dev_private;
5858
	enum pipe pipe = intel_crtc->pipe;
3746 Serge 5859
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4104 Serge 5860
	struct drm_display_mode *adjusted_mode =
5861
		&intel_crtc->config.adjusted_mode;
5060 serge 5862
	uint32_t crtc_vtotal, crtc_vblank_end;
5863
	int vsyncshift = 0;
3243 Serge 5864
 
4104 Serge 5865
	/* We need to be careful not to changed the adjusted mode, for otherwise
5866
	 * the hw state checker will get angry at the mismatch. */
5867
	crtc_vtotal = adjusted_mode->crtc_vtotal;
5868
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5869
 
5060 serge 5870
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3243 Serge 5871
		/* the chip adds 2 halflines automatically */
4104 Serge 5872
		crtc_vtotal -= 1;
5873
		crtc_vblank_end -= 1;
5060 serge 5874
 
5875
		if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5876
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5877
		else
5878
			vsyncshift = adjusted_mode->crtc_hsync_start -
5879
				adjusted_mode->crtc_htotal / 2;
5880
		if (vsyncshift < 0)
5881
			vsyncshift += adjusted_mode->crtc_htotal;
3243 Serge 5882
	}
5883
 
5884
	if (INTEL_INFO(dev)->gen > 3)
5885
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
5886
 
5887
	I915_WRITE(HTOTAL(cpu_transcoder),
5888
		   (adjusted_mode->crtc_hdisplay - 1) |
5889
		   ((adjusted_mode->crtc_htotal - 1) << 16));
5890
	I915_WRITE(HBLANK(cpu_transcoder),
5891
		   (adjusted_mode->crtc_hblank_start - 1) |
5892
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5893
	I915_WRITE(HSYNC(cpu_transcoder),
5894
		   (adjusted_mode->crtc_hsync_start - 1) |
5895
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5896
 
5897
	I915_WRITE(VTOTAL(cpu_transcoder),
5898
		   (adjusted_mode->crtc_vdisplay - 1) |
4104 Serge 5899
		   ((crtc_vtotal - 1) << 16));
3243 Serge 5900
	I915_WRITE(VBLANK(cpu_transcoder),
5901
		   (adjusted_mode->crtc_vblank_start - 1) |
4104 Serge 5902
		   ((crtc_vblank_end - 1) << 16));
3243 Serge 5903
	I915_WRITE(VSYNC(cpu_transcoder),
5904
		   (adjusted_mode->crtc_vsync_start - 1) |
5905
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5906
 
5907
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5908
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5909
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
5910
	 * bits. */
5911
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
5912
	    (pipe == PIPE_B || pipe == PIPE_C))
5913
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
5914
 
5915
	/* pipesrc controls the size that is scaled from, which should
5916
	 * always be the user's requested size.
5917
	 */
5918
	I915_WRITE(PIPESRC(pipe),
4560 Serge 5919
		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
5920
		   (intel_crtc->config.pipe_src_h - 1));
3243 Serge 5921
}
5922
 
4104 Serge 5923
static void intel_get_pipe_timings(struct intel_crtc *crtc,
5924
				   struct intel_crtc_config *pipe_config)
5925
{
5926
	struct drm_device *dev = crtc->base.dev;
5927
	struct drm_i915_private *dev_priv = dev->dev_private;
5928
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5929
	uint32_t tmp;
5930
 
5931
	tmp = I915_READ(HTOTAL(cpu_transcoder));
5932
	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5933
	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5934
	tmp = I915_READ(HBLANK(cpu_transcoder));
5935
	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
5936
	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
5937
	tmp = I915_READ(HSYNC(cpu_transcoder));
5938
	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5939
	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5940
 
5941
	tmp = I915_READ(VTOTAL(cpu_transcoder));
5942
	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5943
	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5944
	tmp = I915_READ(VBLANK(cpu_transcoder));
5945
	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
5946
	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
5947
	tmp = I915_READ(VSYNC(cpu_transcoder));
5948
	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5949
	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5950
 
5951
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
5952
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5953
		pipe_config->adjusted_mode.crtc_vtotal += 1;
5954
		pipe_config->adjusted_mode.crtc_vblank_end += 1;
5955
	}
5956
 
5957
	tmp = I915_READ(PIPESRC(crtc->pipe));
4560 Serge 5958
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5959
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5960
 
5961
	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5962
	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
4104 Serge 5963
}
5964
 
5060 serge 5965
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
4104 Serge 5966
					     struct intel_crtc_config *pipe_config)
5967
{
5060 serge 5968
	mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5969
	mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
5970
	mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5971
	mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
4104 Serge 5972
 
5060 serge 5973
	mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5974
	mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5975
	mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5976
	mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
4104 Serge 5977
 
5060 serge 5978
	mode->flags = pipe_config->adjusted_mode.flags;
4104 Serge 5979
 
5060 serge 5980
	mode->clock = pipe_config->adjusted_mode.crtc_clock;
5981
	mode->flags |= pipe_config->adjusted_mode.flags;
4104 Serge 5982
}
5983
 
3746 Serge 5984
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5985
{
5986
	struct drm_device *dev = intel_crtc->base.dev;
5987
	struct drm_i915_private *dev_priv = dev->dev_private;
5988
	uint32_t pipeconf;
5989
 
4104 Serge 5990
	pipeconf = 0;
3746 Serge 5991
 
4104 Serge 5992
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
5993
	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
5994
		pipeconf |= PIPECONF_ENABLE;
5995
 
4560 Serge 5996
	if (intel_crtc->config.double_wide)
3746 Serge 5997
			pipeconf |= PIPECONF_DOUBLE_WIDE;
5998
 
4104 Serge 5999
	/* only g4x and later have fancy bpc/dither controls */
6000
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6001
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
6002
		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
6003
			pipeconf |= PIPECONF_DITHER_EN |
3746 Serge 6004
				    PIPECONF_DITHER_TYPE_SP;
6005
 
4104 Serge 6006
		switch (intel_crtc->config.pipe_bpp) {
6007
		case 18:
6008
			pipeconf |= PIPECONF_6BPC;
6009
			break;
6010
		case 24:
6011
			pipeconf |= PIPECONF_8BPC;
6012
			break;
6013
		case 30:
6014
			pipeconf |= PIPECONF_10BPC;
6015
			break;
6016
		default:
6017
			/* Case prevented by intel_choose_pipe_bpp_dither. */
6018
			BUG();
3746 Serge 6019
		}
6020
	}
6021
 
6022
	if (HAS_PIPE_CXSR(dev)) {
6023
		if (intel_crtc->lowfreq_avail) {
6024
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6025
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6026
		} else {
6027
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6028
		}
6029
	}
6030
 
5060 serge 6031
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6032
		if (INTEL_INFO(dev)->gen < 4 ||
6033
		    intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
3746 Serge 6034
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6035
	else
5060 serge 6036
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6037
	} else
3746 Serge 6038
		pipeconf |= PIPECONF_PROGRESSIVE;
6039
 
4104 Serge 6040
	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
3746 Serge 6041
			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6042
 
6043
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6044
	POSTING_READ(PIPECONF(intel_crtc->pipe));
6045
}
6046
 
3031 serge 6047
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6048
			      int x, int y,
6049
			      struct drm_framebuffer *fb)
6050
{
6051
	struct drm_device *dev = crtc->dev;
6052
	struct drm_i915_private *dev_priv = dev->dev_private;
6053
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6054
	int refclk, num_connectors = 0;
6055
	intel_clock_t clock, reduced_clock;
4104 Serge 6056
	bool ok, has_reduced_clock = false;
4560 Serge 6057
	bool is_lvds = false, is_dsi = false;
3031 serge 6058
	struct intel_encoder *encoder;
6059
	const intel_limit_t *limit;
6060
 
6061
	for_each_encoder_on_crtc(dev, crtc, encoder) {
6062
		switch (encoder->type) {
6063
		case INTEL_OUTPUT_LVDS:
6064
			is_lvds = true;
6065
			break;
4560 Serge 6066
		case INTEL_OUTPUT_DSI:
6067
			is_dsi = true;
6068
			break;
3031 serge 6069
		}
6070
 
6071
		num_connectors++;
6072
	}
6073
 
4560 Serge 6074
	if (is_dsi)
5060 serge 6075
		return 0;
4560 Serge 6076
 
6077
	if (!intel_crtc->config.clock_set) {
3031 serge 6078
	refclk = i9xx_get_refclk(crtc, num_connectors);
6079
 
6080
	/*
4560 Serge 6081
		 * Returns a set of divisors for the desired target clock with
6082
		 * the given refclk, or FALSE.  The returned values represent
6083
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6084
		 * 2) / p1 / p2.
3031 serge 6085
	 */
6086
	limit = intel_limit(crtc, refclk);
4104 Serge 6087
	ok = dev_priv->display.find_dpll(limit, crtc,
6088
					 intel_crtc->config.port_clock,
6089
					 refclk, NULL, &clock);
4560 Serge 6090
		if (!ok) {
3031 serge 6091
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
6092
		return -EINVAL;
6093
	}
6094
 
6095
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6096
		/*
4560 Serge 6097
			 * Ensure we match the reduced clock's P to the target
6098
			 * clock.  If the clocks don't match, we can't switch
6099
			 * the display clock by using the FP0/FP1. In such case
6100
			 * we will disable the LVDS downclock feature.
3031 serge 6101
		*/
4104 Serge 6102
		has_reduced_clock =
6103
			dev_priv->display.find_dpll(limit, crtc,
3031 serge 6104
						    dev_priv->lvds_downclock,
4104 Serge 6105
						    refclk, &clock,
3031 serge 6106
						    &reduced_clock);
6107
	}
3746 Serge 6108
	/* Compat-code for transition, will disappear. */
6109
		intel_crtc->config.dpll.n = clock.n;
6110
		intel_crtc->config.dpll.m1 = clock.m1;
6111
		intel_crtc->config.dpll.m2 = clock.m2;
6112
		intel_crtc->config.dpll.p1 = clock.p1;
6113
		intel_crtc->config.dpll.p2 = clock.p2;
6114
	}
3031 serge 6115
 
4560 Serge 6116
	if (IS_GEN2(dev)) {
4104 Serge 6117
		i8xx_update_pll(intel_crtc,
3243 Serge 6118
				has_reduced_clock ? &reduced_clock : NULL,
6119
				num_connectors);
5060 serge 6120
	} else if (IS_CHERRYVIEW(dev)) {
6121
		chv_update_pll(intel_crtc);
4560 Serge 6122
	} else if (IS_VALLEYVIEW(dev)) {
3746 Serge 6123
		vlv_update_pll(intel_crtc);
4560 Serge 6124
	} else {
3746 Serge 6125
		i9xx_update_pll(intel_crtc,
3031 serge 6126
				has_reduced_clock ? &reduced_clock : NULL,
6127
				num_connectors);
4560 Serge 6128
	}
3031 serge 6129
 
5060 serge 6130
	return 0;
2327 Serge 6131
}
6132
 
4104 Serge 6133
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6134
				 struct intel_crtc_config *pipe_config)
6135
{
6136
	struct drm_device *dev = crtc->base.dev;
6137
	struct drm_i915_private *dev_priv = dev->dev_private;
6138
	uint32_t tmp;
6139
 
4560 Serge 6140
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6141
		return;
6142
 
4104 Serge 6143
	tmp = I915_READ(PFIT_CONTROL);
6144
	if (!(tmp & PFIT_ENABLE))
6145
		return;
6146
 
6147
	/* Check whether the pfit is attached to our pipe. */
6148
	if (INTEL_INFO(dev)->gen < 4) {
6149
		if (crtc->pipe != PIPE_B)
6150
			return;
6151
	} else {
6152
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6153
			return;
6154
	}
6155
 
6156
	pipe_config->gmch_pfit.control = tmp;
6157
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6158
	if (INTEL_INFO(dev)->gen < 5)
6159
		pipe_config->gmch_pfit.lvds_border_bits =
6160
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6161
}
6162
 
4398 Serge 6163
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6164
			       struct intel_crtc_config *pipe_config)
6165
{
6166
	struct drm_device *dev = crtc->base.dev;
6167
	struct drm_i915_private *dev_priv = dev->dev_private;
6168
	int pipe = pipe_config->cpu_transcoder;
6169
	intel_clock_t clock;
6170
	u32 mdiv;
6171
	int refclk = 100000;
6172
 
5060 serge 6173
	/* In case of MIPI DPLL will not even be used */
6174
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
6175
		return;
6176
 
4398 Serge 6177
	mutex_lock(&dev_priv->dpio_lock);
4560 Serge 6178
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4398 Serge 6179
	mutex_unlock(&dev_priv->dpio_lock);
6180
 
6181
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6182
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
6183
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6184
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6185
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6186
 
4560 Serge 6187
	vlv_clock(refclk, &clock);
4398 Serge 6188
 
4560 Serge 6189
	/* clock.dot is the fast clock */
6190
	pipe_config->port_clock = clock.dot / 5;
4398 Serge 6191
}
6192
 
5060 serge 6193
static void i9xx_get_plane_config(struct intel_crtc *crtc,
6194
				  struct intel_plane_config *plane_config)
6195
{
6196
	struct drm_device *dev = crtc->base.dev;
6197
	struct drm_i915_private *dev_priv = dev->dev_private;
6198
	u32 val, base, offset;
6199
	int pipe = crtc->pipe, plane = crtc->plane;
6200
	int fourcc, pixel_format;
6201
	int aligned_height;
6202
 
6203
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6204
	if (!crtc->base.primary->fb) {
6205
		DRM_DEBUG_KMS("failed to alloc fb\n");
6206
		return;
6207
	}
6208
 
6209
	val = I915_READ(DSPCNTR(plane));
6210
 
6211
	if (INTEL_INFO(dev)->gen >= 4)
6212
		if (val & DISPPLANE_TILED)
6213
			plane_config->tiled = true;
6214
 
6215
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6216
	fourcc = intel_format_to_fourcc(pixel_format);
6217
	crtc->base.primary->fb->pixel_format = fourcc;
6218
	crtc->base.primary->fb->bits_per_pixel =
6219
		drm_format_plane_cpp(fourcc, 0) * 8;
6220
 
6221
	if (INTEL_INFO(dev)->gen >= 4) {
6222
		if (plane_config->tiled)
6223
			offset = I915_READ(DSPTILEOFF(plane));
6224
		else
6225
			offset = I915_READ(DSPLINOFF(plane));
6226
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6227
	} else {
6228
		base = I915_READ(DSPADDR(plane));
6229
	}
6230
	plane_config->base = base;
6231
 
6232
	val = I915_READ(PIPESRC(pipe));
6233
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6234
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6235
 
6236
	val = I915_READ(DSPSTRIDE(pipe));
6237
	crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
6238
 
6239
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6240
					    plane_config->tiled);
6241
 
6242
	plane_config->size = 16*1024*1024;
6243
 
6244
 
6245
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6246
		      pipe, plane, crtc->base.primary->fb->width,
6247
		      crtc->base.primary->fb->height,
6248
		      crtc->base.primary->fb->bits_per_pixel, base,
6249
		      crtc->base.primary->fb->pitches[0],
6250
		      plane_config->size);
6251
 
6252
}
6253
 
6254
static void chv_crtc_clock_get(struct intel_crtc *crtc,
6255
			       struct intel_crtc_config *pipe_config)
6256
{
6257
	struct drm_device *dev = crtc->base.dev;
6258
	struct drm_i915_private *dev_priv = dev->dev_private;
6259
	int pipe = pipe_config->cpu_transcoder;
6260
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6261
	intel_clock_t clock;
6262
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6263
	int refclk = 100000;
6264
 
6265
	mutex_lock(&dev_priv->dpio_lock);
6266
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6267
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6268
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6269
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6270
	mutex_unlock(&dev_priv->dpio_lock);
6271
 
6272
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6273
	clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6274
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6275
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6276
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6277
 
6278
	chv_clock(refclk, &clock);
6279
 
6280
	/* clock.dot is the fast clock */
6281
	pipe_config->port_clock = clock.dot / 5;
6282
}
6283
 
3746 Serge 6284
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6285
				 struct intel_crtc_config *pipe_config)
6286
{
6287
	struct drm_device *dev = crtc->base.dev;
6288
	struct drm_i915_private *dev_priv = dev->dev_private;
6289
	uint32_t tmp;
6290
 
5060 serge 6291
	if (!intel_display_power_enabled(dev_priv,
6292
					 POWER_DOMAIN_PIPE(crtc->pipe)))
6293
		return false;
6294
 
4104 Serge 6295
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6296
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6297
 
3746 Serge 6298
	tmp = I915_READ(PIPECONF(crtc->pipe));
6299
	if (!(tmp & PIPECONF_ENABLE))
6300
		return false;
6301
 
4280 Serge 6302
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6303
		switch (tmp & PIPECONF_BPC_MASK) {
6304
		case PIPECONF_6BPC:
6305
			pipe_config->pipe_bpp = 18;
6306
			break;
6307
		case PIPECONF_8BPC:
6308
			pipe_config->pipe_bpp = 24;
6309
			break;
6310
		case PIPECONF_10BPC:
6311
			pipe_config->pipe_bpp = 30;
6312
			break;
6313
		default:
6314
			break;
6315
		}
6316
	}
6317
 
5060 serge 6318
	if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6319
		pipe_config->limited_color_range = true;
6320
 
4560 Serge 6321
	if (INTEL_INFO(dev)->gen < 4)
6322
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6323
 
4104 Serge 6324
	intel_get_pipe_timings(crtc, pipe_config);
6325
 
6326
	i9xx_get_pfit_config(crtc, pipe_config);
6327
 
6328
	if (INTEL_INFO(dev)->gen >= 4) {
6329
		tmp = I915_READ(DPLL_MD(crtc->pipe));
6330
		pipe_config->pixel_multiplier =
6331
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6332
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6333
		pipe_config->dpll_hw_state.dpll_md = tmp;
6334
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6335
		tmp = I915_READ(DPLL(crtc->pipe));
6336
		pipe_config->pixel_multiplier =
6337
			((tmp & SDVO_MULTIPLIER_MASK)
6338
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6339
	} else {
6340
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
6341
		 * port and will be fixed up in the encoder->get_config
6342
		 * function. */
6343
		pipe_config->pixel_multiplier = 1;
6344
	}
6345
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6346
	if (!IS_VALLEYVIEW(dev)) {
6347
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6348
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6349
	} else {
6350
		/* Mask out read-only status bits. */
6351
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6352
						     DPLL_PORTC_READY_MASK |
6353
						     DPLL_PORTB_READY_MASK);
6354
	}
6355
 
5060 serge 6356
	if (IS_CHERRYVIEW(dev))
6357
		chv_crtc_clock_get(crtc, pipe_config);
6358
	else if (IS_VALLEYVIEW(dev))
4560 Serge 6359
		vlv_crtc_clock_get(crtc, pipe_config);
6360
	else
6361
		i9xx_crtc_clock_get(crtc, pipe_config);
6362
 
3746 Serge 6363
	return true;
6364
}
6365
 
3243 Serge 6366
static void ironlake_init_pch_refclk(struct drm_device *dev)
2327 Serge 6367
{
6368
	struct drm_i915_private *dev_priv = dev->dev_private;
6369
	struct drm_mode_config *mode_config = &dev->mode_config;
6370
	struct intel_encoder *encoder;
3746 Serge 6371
	u32 val, final;
2327 Serge 6372
	bool has_lvds = false;
2342 Serge 6373
	bool has_cpu_edp = false;
6374
	bool has_panel = false;
6375
	bool has_ck505 = false;
6376
	bool can_ssc = false;
2327 Serge 6377
 
6378
	/* We need to take the global config into account */
6379
		list_for_each_entry(encoder, &mode_config->encoder_list,
6380
				    base.head) {
6381
			switch (encoder->type) {
6382
			case INTEL_OUTPUT_LVDS:
2342 Serge 6383
			has_panel = true;
2327 Serge 6384
				has_lvds = true;
2342 Serge 6385
			break;
2327 Serge 6386
			case INTEL_OUTPUT_EDP:
2342 Serge 6387
			has_panel = true;
4104 Serge 6388
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
2342 Serge 6389
				has_cpu_edp = true;
2327 Serge 6390
				break;
6391
			}
6392
		}
2342 Serge 6393
 
6394
	if (HAS_PCH_IBX(dev)) {
4104 Serge 6395
		has_ck505 = dev_priv->vbt.display_clock_mode;
2342 Serge 6396
		can_ssc = has_ck505;
6397
	} else {
6398
		has_ck505 = false;
6399
		can_ssc = true;
2327 Serge 6400
	}
6401
 
4104 Serge 6402
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
6403
		      has_panel, has_lvds, has_ck505);
2342 Serge 6404
 
2327 Serge 6405
	/* Ironlake: try to setup display ref clock before DPLL
6406
	 * enabling. This is only under driver's control after
6407
	 * PCH B stepping, previous chipset stepping should be
6408
	 * ignoring this setting.
6409
	 */
3746 Serge 6410
	val = I915_READ(PCH_DREF_CONTROL);
6411
 
6412
	/* As we must carefully and slowly disable/enable each source in turn,
6413
	 * compute the final state we want first and check if we need to
6414
	 * make any changes at all.
6415
	 */
6416
	final = val;
6417
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
6418
	if (has_ck505)
6419
		final |= DREF_NONSPREAD_CK505_ENABLE;
6420
	else
6421
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
6422
 
6423
	final &= ~DREF_SSC_SOURCE_MASK;
6424
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6425
	final &= ~DREF_SSC1_ENABLE;
6426
 
6427
	if (has_panel) {
6428
		final |= DREF_SSC_SOURCE_ENABLE;
6429
 
6430
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
6431
			final |= DREF_SSC1_ENABLE;
6432
 
6433
		if (has_cpu_edp) {
6434
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
6435
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6436
			else
6437
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6438
		} else
6439
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6440
	} else {
6441
		final |= DREF_SSC_SOURCE_DISABLE;
6442
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6443
	}
6444
 
6445
	if (final == val)
6446
		return;
6447
 
2327 Serge 6448
	/* Always enable nonspread source */
3746 Serge 6449
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
2342 Serge 6450
 
6451
	if (has_ck505)
3746 Serge 6452
		val |= DREF_NONSPREAD_CK505_ENABLE;
2342 Serge 6453
	else
3746 Serge 6454
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
2342 Serge 6455
 
6456
	if (has_panel) {
3746 Serge 6457
		val &= ~DREF_SSC_SOURCE_MASK;
6458
		val |= DREF_SSC_SOURCE_ENABLE;
2327 Serge 6459
 
2342 Serge 6460
		/* SSC must be turned on before enabling the CPU output  */
6461
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6462
			DRM_DEBUG_KMS("Using SSC on panel\n");
3746 Serge 6463
			val |= DREF_SSC1_ENABLE;
3031 serge 6464
		} else
3746 Serge 6465
			val &= ~DREF_SSC1_ENABLE;
2327 Serge 6466
 
2342 Serge 6467
		/* Get SSC going before enabling the outputs */
3746 Serge 6468
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 6469
			POSTING_READ(PCH_DREF_CONTROL);
6470
			udelay(200);
2342 Serge 6471
 
3746 Serge 6472
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2327 Serge 6473
 
6474
		/* Enable CPU source on CPU attached eDP */
2342 Serge 6475
		if (has_cpu_edp) {
6476
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6477
				DRM_DEBUG_KMS("Using SSC on eDP\n");
3746 Serge 6478
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5060 serge 6479
			} else
3746 Serge 6480
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2342 Serge 6481
		} else
3746 Serge 6482
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 6483
 
3746 Serge 6484
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 6485
		POSTING_READ(PCH_DREF_CONTROL);
6486
		udelay(200);
2327 Serge 6487
		} else {
2342 Serge 6488
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
6489
 
3746 Serge 6490
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2342 Serge 6491
 
6492
		/* Turn off CPU output */
3746 Serge 6493
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 6494
 
3746 Serge 6495
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 6496
		POSTING_READ(PCH_DREF_CONTROL);
6497
		udelay(200);
2342 Serge 6498
 
6499
		/* Turn off the SSC source */
3746 Serge 6500
		val &= ~DREF_SSC_SOURCE_MASK;
6501
		val |= DREF_SSC_SOURCE_DISABLE;
2342 Serge 6502
 
6503
		/* Turn off SSC1 */
3746 Serge 6504
		val &= ~DREF_SSC1_ENABLE;
2342 Serge 6505
 
3746 Serge 6506
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 6507
		POSTING_READ(PCH_DREF_CONTROL);
6508
		udelay(200);
2327 Serge 6509
	}
3746 Serge 6510
 
6511
	BUG_ON(val != final);
2327 Serge 6512
}
6513
 
4104 Serge 6514
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
3243 Serge 6515
{
4104 Serge 6516
	uint32_t tmp;
3243 Serge 6517
 
6518
		tmp = I915_READ(SOUTH_CHICKEN2);
6519
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6520
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6521
 
6522
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
6523
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6524
			DRM_ERROR("FDI mPHY reset assert timeout\n");
6525
 
6526
		tmp = I915_READ(SOUTH_CHICKEN2);
6527
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6528
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6529
 
6530
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
4104 Serge 6531
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
3243 Serge 6532
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
4539 Serge 6533
}
3243 Serge 6534
 
4104 Serge 6535
/* WaMPhyProgramming:hsw */
6536
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6537
{
6538
	uint32_t tmp;
6539
 
3243 Serge 6540
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6541
	tmp &= ~(0xFF << 24);
6542
	tmp |= (0x12 << 24);
6543
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6544
 
6545
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6546
	tmp |= (1 << 11);
6547
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6548
 
6549
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6550
	tmp |= (1 << 11);
6551
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6552
 
6553
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6554
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6555
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6556
 
6557
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6558
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6559
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6560
 
6561
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6562
		tmp &= ~(7 << 13);
6563
		tmp |= (5 << 13);
6564
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6565
 
6566
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6567
		tmp &= ~(7 << 13);
6568
		tmp |= (5 << 13);
6569
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6570
 
6571
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6572
	tmp &= ~0xFF;
6573
	tmp |= 0x1C;
6574
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6575
 
6576
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6577
	tmp &= ~0xFF;
6578
	tmp |= 0x1C;
6579
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6580
 
6581
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6582
	tmp &= ~(0xFF << 16);
6583
	tmp |= (0x1C << 16);
6584
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6585
 
6586
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6587
	tmp &= ~(0xFF << 16);
6588
	tmp |= (0x1C << 16);
6589
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6590
 
6591
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
6592
		tmp |= (1 << 27);
6593
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
6594
 
6595
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
6596
		tmp |= (1 << 27);
6597
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
6598
 
6599
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
6600
		tmp &= ~(0xF << 28);
6601
		tmp |= (4 << 28);
6602
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
6603
 
6604
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
6605
		tmp &= ~(0xF << 28);
6606
		tmp |= (4 << 28);
6607
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
4539 Serge 6608
}
3243 Serge 6609
 
4104 Serge 6610
/* Implements 3 different sequences from BSpec chapter "Display iCLK
6611
 * Programming" based on the parameters passed:
6612
 * - Sequence to enable CLKOUT_DP
6613
 * - Sequence to enable CLKOUT_DP without spread
6614
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
6615
 */
6616
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
6617
				 bool with_fdi)
6618
{
6619
	struct drm_i915_private *dev_priv = dev->dev_private;
6620
	uint32_t reg, tmp;
3480 Serge 6621
 
4104 Serge 6622
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
6623
		with_spread = true;
6624
	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
6625
		 with_fdi, "LP PCH doesn't have FDI\n"))
6626
		with_fdi = false;
6627
 
6628
	mutex_lock(&dev_priv->dpio_lock);
6629
 
6630
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6631
	tmp &= ~SBI_SSCCTL_DISABLE;
6632
	tmp |= SBI_SSCCTL_PATHALT;
6633
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6634
 
6635
	udelay(24);
6636
 
6637
	if (with_spread) {
6638
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6639
		tmp &= ~SBI_SSCCTL_PATHALT;
6640
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6641
 
6642
		if (with_fdi) {
6643
			lpt_reset_fdi_mphy(dev_priv);
6644
			lpt_program_fdi_mphy(dev_priv);
6645
		}
6646
	}
6647
 
6648
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6649
	       SBI_GEN0 : SBI_DBUFF0;
6650
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6651
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6652
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6653
 
3480 Serge 6654
	mutex_unlock(&dev_priv->dpio_lock);
3243 Serge 6655
}
6656
 
4104 Serge 6657
/* Sequence to disable CLKOUT_DP */
6658
static void lpt_disable_clkout_dp(struct drm_device *dev)
6659
{
6660
	struct drm_i915_private *dev_priv = dev->dev_private;
6661
	uint32_t reg, tmp;
6662
 
6663
	mutex_lock(&dev_priv->dpio_lock);
6664
 
6665
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6666
	       SBI_GEN0 : SBI_DBUFF0;
6667
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6668
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6669
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6670
 
6671
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6672
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
6673
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
6674
			tmp |= SBI_SSCCTL_PATHALT;
6675
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6676
			udelay(32);
6677
		}
6678
		tmp |= SBI_SSCCTL_DISABLE;
6679
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6680
	}
6681
 
6682
	mutex_unlock(&dev_priv->dpio_lock);
6683
}
6684
 
6685
static void lpt_init_pch_refclk(struct drm_device *dev)
6686
{
6687
	struct drm_mode_config *mode_config = &dev->mode_config;
6688
	struct intel_encoder *encoder;
6689
	bool has_vga = false;
6690
 
6691
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
6692
		switch (encoder->type) {
6693
		case INTEL_OUTPUT_ANALOG:
6694
			has_vga = true;
6695
			break;
6696
		}
6697
	}
6698
 
6699
	if (has_vga)
6700
		lpt_enable_clkout_dp(dev, true, true);
6701
	else
6702
		lpt_disable_clkout_dp(dev);
6703
}
6704
 
3243 Serge 6705
/*
6706
 * Initialize reference clocks when the driver loads
6707
 */
6708
void intel_init_pch_refclk(struct drm_device *dev)
6709
{
6710
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
6711
		ironlake_init_pch_refclk(dev);
6712
	else if (HAS_PCH_LPT(dev))
6713
		lpt_init_pch_refclk(dev);
6714
}
6715
 
2342 Serge 6716
static int ironlake_get_refclk(struct drm_crtc *crtc)
6717
{
6718
	struct drm_device *dev = crtc->dev;
6719
	struct drm_i915_private *dev_priv = dev->dev_private;
6720
	struct intel_encoder *encoder;
6721
	int num_connectors = 0;
6722
	bool is_lvds = false;
6723
 
3031 serge 6724
	for_each_encoder_on_crtc(dev, crtc, encoder) {
2342 Serge 6725
		switch (encoder->type) {
6726
		case INTEL_OUTPUT_LVDS:
6727
			is_lvds = true;
6728
			break;
6729
		}
6730
		num_connectors++;
6731
	}
6732
 
6733
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 6734
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
4104 Serge 6735
			      dev_priv->vbt.lvds_ssc_freq);
4560 Serge 6736
		return dev_priv->vbt.lvds_ssc_freq;
2342 Serge 6737
	}
6738
 
6739
	return 120000;
6740
}
6741
 
4104 Serge 6742
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
3031 serge 6743
{
6744
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
6745
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6746
	int pipe = intel_crtc->pipe;
6747
	uint32_t val;
6748
 
4104 Serge 6749
	val = 0;
3031 serge 6750
 
3746 Serge 6751
	switch (intel_crtc->config.pipe_bpp) {
3031 serge 6752
	case 18:
3480 Serge 6753
		val |= PIPECONF_6BPC;
3031 serge 6754
		break;
6755
	case 24:
3480 Serge 6756
		val |= PIPECONF_8BPC;
3031 serge 6757
		break;
6758
	case 30:
3480 Serge 6759
		val |= PIPECONF_10BPC;
3031 serge 6760
		break;
6761
	case 36:
3480 Serge 6762
		val |= PIPECONF_12BPC;
3031 serge 6763
		break;
6764
	default:
3243 Serge 6765
		/* Case prevented by intel_choose_pipe_bpp_dither. */
6766
		BUG();
3031 serge 6767
	}
6768
 
4104 Serge 6769
	if (intel_crtc->config.dither)
3031 serge 6770
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6771
 
4104 Serge 6772
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3031 serge 6773
		val |= PIPECONF_INTERLACED_ILK;
6774
	else
6775
		val |= PIPECONF_PROGRESSIVE;
6776
 
3746 Serge 6777
	if (intel_crtc->config.limited_color_range)
3480 Serge 6778
		val |= PIPECONF_COLOR_RANGE_SELECT;
6779
 
3031 serge 6780
	I915_WRITE(PIPECONF(pipe), val);
6781
	POSTING_READ(PIPECONF(pipe));
6782
}
6783
 
3480 Serge 6784
/*
6785
 * Set up the pipe CSC unit.
6786
 *
6787
 * Currently only full range RGB to limited range RGB conversion
6788
 * is supported, but eventually this should handle various
6789
 * RGB<->YCbCr scenarios as well.
6790
 */
3746 Serge 6791
static void intel_set_pipe_csc(struct drm_crtc *crtc)
3480 Serge 6792
{
6793
	struct drm_device *dev = crtc->dev;
6794
	struct drm_i915_private *dev_priv = dev->dev_private;
6795
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6796
	int pipe = intel_crtc->pipe;
6797
	uint16_t coeff = 0x7800; /* 1.0 */
6798
 
6799
	/*
6800
	 * TODO: Check what kind of values actually come out of the pipe
6801
	 * with these coeff/postoff values and adjust to get the best
6802
	 * accuracy. Perhaps we even need to take the bpc value into
6803
	 * consideration.
6804
	 */
6805
 
3746 Serge 6806
	if (intel_crtc->config.limited_color_range)
3480 Serge 6807
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
6808
 
6809
	/*
6810
	 * GY/GU and RY/RU should be the other way around according
6811
	 * to BSpec, but reality doesn't agree. Just set them up in
6812
	 * a way that results in the correct picture.
6813
	 */
6814
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
6815
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
6816
 
6817
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
6818
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
6819
 
6820
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
6821
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
6822
 
6823
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
6824
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
6825
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
6826
 
6827
	if (INTEL_INFO(dev)->gen > 6) {
6828
		uint16_t postoff = 0;
6829
 
3746 Serge 6830
		if (intel_crtc->config.limited_color_range)
4398 Serge 6831
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
3480 Serge 6832
 
6833
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6834
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
6835
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
6836
 
6837
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
6838
	} else {
6839
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
6840
 
3746 Serge 6841
		if (intel_crtc->config.limited_color_range)
3480 Serge 6842
			mode |= CSC_BLACK_SCREEN_OFFSET;
6843
 
6844
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
6845
	}
6846
}
6847
 
4104 Serge 6848
static void haswell_set_pipeconf(struct drm_crtc *crtc)
3243 Serge 6849
{
4560 Serge 6850
	struct drm_device *dev = crtc->dev;
6851
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 6852
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4560 Serge 6853
	enum pipe pipe = intel_crtc->pipe;
3746 Serge 6854
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 6855
	uint32_t val;
6856
 
4104 Serge 6857
	val = 0;
3243 Serge 6858
 
4560 Serge 6859
	if (IS_HASWELL(dev) && intel_crtc->config.dither)
3243 Serge 6860
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6861
 
4104 Serge 6862
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3243 Serge 6863
		val |= PIPECONF_INTERLACED_ILK;
6864
	else
6865
		val |= PIPECONF_PROGRESSIVE;
6866
 
6867
	I915_WRITE(PIPECONF(cpu_transcoder), val);
6868
	POSTING_READ(PIPECONF(cpu_transcoder));
4104 Serge 6869
 
6870
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6871
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
4560 Serge 6872
 
6873
	if (IS_BROADWELL(dev)) {
6874
		val = 0;
6875
 
6876
		switch (intel_crtc->config.pipe_bpp) {
6877
		case 18:
6878
			val |= PIPEMISC_DITHER_6_BPC;
6879
			break;
6880
		case 24:
6881
			val |= PIPEMISC_DITHER_8_BPC;
6882
			break;
6883
		case 30:
6884
			val |= PIPEMISC_DITHER_10_BPC;
6885
			break;
6886
		case 36:
6887
			val |= PIPEMISC_DITHER_12_BPC;
6888
			break;
6889
		default:
6890
			/* Case prevented by pipe_config_set_bpp. */
6891
			BUG();
6892
		}
6893
 
6894
		if (intel_crtc->config.dither)
6895
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6896
 
6897
		I915_WRITE(PIPEMISC(pipe), val);
6898
	}
3243 Serge 6899
}
6900
 
3031 serge 6901
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6902
				    intel_clock_t *clock,
6903
				    bool *has_reduced_clock,
6904
				    intel_clock_t *reduced_clock)
6905
{
6906
	struct drm_device *dev = crtc->dev;
6907
	struct drm_i915_private *dev_priv = dev->dev_private;
6908
	struct intel_encoder *intel_encoder;
6909
	int refclk;
6910
	const intel_limit_t *limit;
4104 Serge 6911
	bool ret, is_lvds = false;
3031 serge 6912
 
6913
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6914
		switch (intel_encoder->type) {
6915
		case INTEL_OUTPUT_LVDS:
6916
			is_lvds = true;
6917
			break;
6918
		}
6919
	}
6920
 
6921
	refclk = ironlake_get_refclk(crtc);
6922
 
6923
	/*
6924
	 * Returns a set of divisors for the desired target clock with the given
6925
	 * refclk, or FALSE.  The returned values represent the clock equation:
6926
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
6927
	 */
6928
	limit = intel_limit(crtc, refclk);
4104 Serge 6929
	ret = dev_priv->display.find_dpll(limit, crtc,
6930
					  to_intel_crtc(crtc)->config.port_clock,
6931
					  refclk, NULL, clock);
3031 serge 6932
	if (!ret)
6933
		return false;
6934
 
6935
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6936
		/*
6937
		 * Ensure we match the reduced clock's P to the target clock.
6938
		 * If the clocks don't match, we can't switch the display clock
6939
		 * by using the FP0/FP1. In such case we will disable the LVDS
6940
		 * downclock feature.
6941
		*/
4104 Serge 6942
		*has_reduced_clock =
6943
			dev_priv->display.find_dpll(limit, crtc,
3031 serge 6944
						     dev_priv->lvds_downclock,
4104 Serge 6945
						    refclk, clock,
3031 serge 6946
						     reduced_clock);
6947
	}
6948
 
6949
	return true;
6950
}
6951
 
3243 Serge 6952
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
6953
{
6954
	/*
6955
	 * Account for spread spectrum to avoid
6956
	 * oversubscribing the link. Max center spread
6957
	 * is 2.5%; use 5% for safety's sake.
6958
	 */
6959
	u32 bps = target_clock * bpp * 21 / 20;
5060 serge 6960
	return DIV_ROUND_UP(bps, link_bw * 8);
3243 Serge 6961
}
6962
 
4104 Serge 6963
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
2327 Serge 6964
{
4104 Serge 6965
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
3746 Serge 6966
}
6967
 
3243 Serge 6968
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
4104 Serge 6969
				      u32 *fp,
3746 Serge 6970
				      intel_clock_t *reduced_clock, u32 *fp2)
3243 Serge 6971
{
6972
	struct drm_crtc *crtc = &intel_crtc->base;
6973
	struct drm_device *dev = crtc->dev;
6974
	struct drm_i915_private *dev_priv = dev->dev_private;
6975
	struct intel_encoder *intel_encoder;
6976
	uint32_t dpll;
3746 Serge 6977
	int factor, num_connectors = 0;
4104 Serge 6978
	bool is_lvds = false, is_sdvo = false;
3243 Serge 6979
 
6980
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6981
		switch (intel_encoder->type) {
6982
		case INTEL_OUTPUT_LVDS:
6983
			is_lvds = true;
6984
			break;
6985
		case INTEL_OUTPUT_SDVO:
6986
		case INTEL_OUTPUT_HDMI:
6987
			is_sdvo = true;
6988
			break;
6989
		}
6990
 
6991
		num_connectors++;
6992
	}
6993
 
2327 Serge 6994
    /* Enable autotuning of the PLL clock (if permissible) */
6995
    factor = 21;
6996
    if (is_lvds) {
6997
        if ((intel_panel_use_ssc(dev_priv) &&
4560 Serge 6998
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
3746 Serge 6999
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
2327 Serge 7000
            factor = 25;
4104 Serge 7001
	} else if (intel_crtc->config.sdvo_tv_clock)
2327 Serge 7002
        factor = 20;
7003
 
4104 Serge 7004
	if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
3746 Serge 7005
		*fp |= FP_CB_TUNE;
2327 Serge 7006
 
3746 Serge 7007
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
7008
		*fp2 |= FP_CB_TUNE;
7009
 
2327 Serge 7010
    dpll = 0;
7011
 
7012
    if (is_lvds)
7013
        dpll |= DPLLB_MODE_LVDS;
7014
    else
7015
        dpll |= DPLLB_MODE_DAC_SERIAL;
4104 Serge 7016
 
3746 Serge 7017
			dpll |= (intel_crtc->config.pixel_multiplier - 1)
7018
				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
2327 Serge 7019
 
4104 Serge 7020
	if (is_sdvo)
7021
		dpll |= DPLL_SDVO_HIGH_SPEED;
7022
	if (intel_crtc->config.has_dp_encoder)
7023
		dpll |= DPLL_SDVO_HIGH_SPEED;
7024
 
2327 Serge 7025
    /* compute bitmask from p1 value */
4104 Serge 7026
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
2327 Serge 7027
    /* also FPA1 */
4104 Serge 7028
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2327 Serge 7029
 
4104 Serge 7030
	switch (intel_crtc->config.dpll.p2) {
2327 Serge 7031
    case 5:
7032
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7033
        break;
7034
    case 7:
7035
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7036
        break;
7037
    case 10:
7038
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7039
        break;
7040
    case 14:
7041
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7042
        break;
7043
    }
7044
 
4104 Serge 7045
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
2327 Serge 7046
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7047
    else
7048
        dpll |= PLL_REF_INPUT_DREFCLK;
7049
 
4104 Serge 7050
	return dpll | DPLL_VCO_ENABLE;
3243 Serge 7051
}
7052
 
7053
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
7054
				  int x, int y,
7055
				  struct drm_framebuffer *fb)
7056
{
7057
	struct drm_device *dev = crtc->dev;
7058
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7059
	int num_connectors = 0;
7060
	intel_clock_t clock, reduced_clock;
4104 Serge 7061
	u32 dpll = 0, fp = 0, fp2 = 0;
3243 Serge 7062
	bool ok, has_reduced_clock = false;
3746 Serge 7063
	bool is_lvds = false;
3243 Serge 7064
	struct intel_encoder *encoder;
4104 Serge 7065
	struct intel_shared_dpll *pll;
3243 Serge 7066
 
7067
	for_each_encoder_on_crtc(dev, crtc, encoder) {
7068
		switch (encoder->type) {
7069
		case INTEL_OUTPUT_LVDS:
7070
			is_lvds = true;
7071
			break;
7072
		}
7073
 
7074
		num_connectors++;
7075
	}
7076
 
7077
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7078
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7079
 
4104 Serge 7080
	ok = ironlake_compute_clocks(crtc, &clock,
3243 Serge 7081
				     &has_reduced_clock, &reduced_clock);
4104 Serge 7082
	if (!ok && !intel_crtc->config.clock_set) {
3243 Serge 7083
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7084
		return -EINVAL;
7085
	}
3746 Serge 7086
	/* Compat-code for transition, will disappear. */
7087
	if (!intel_crtc->config.clock_set) {
7088
		intel_crtc->config.dpll.n = clock.n;
7089
		intel_crtc->config.dpll.m1 = clock.m1;
7090
		intel_crtc->config.dpll.m2 = clock.m2;
7091
		intel_crtc->config.dpll.p1 = clock.p1;
7092
		intel_crtc->config.dpll.p2 = clock.p2;
7093
	}
3243 Serge 7094
 
4104 Serge 7095
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
7096
	if (intel_crtc->config.has_pch_encoder) {
7097
		fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
3243 Serge 7098
	if (has_reduced_clock)
4104 Serge 7099
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
3243 Serge 7100
 
4104 Serge 7101
		dpll = ironlake_compute_dpll(intel_crtc,
7102
					     &fp, &reduced_clock,
5060 serge 7103
					     has_reduced_clock ? &fp2 : NULL);
3243 Serge 7104
 
4104 Serge 7105
		intel_crtc->config.dpll_hw_state.dpll = dpll;
7106
		intel_crtc->config.dpll_hw_state.fp0 = fp;
7107
		if (has_reduced_clock)
7108
			intel_crtc->config.dpll_hw_state.fp1 = fp2;
7109
		else
7110
			intel_crtc->config.dpll_hw_state.fp1 = fp;
2327 Serge 7111
 
4104 Serge 7112
		pll = intel_get_shared_dpll(intel_crtc);
3031 serge 7113
		if (pll == NULL) {
4104 Serge 7114
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5060 serge 7115
					 pipe_name(intel_crtc->pipe));
2342 Serge 7116
			return -EINVAL;
2327 Serge 7117
        }
3031 serge 7118
	} else
4104 Serge 7119
		intel_put_shared_dpll(intel_crtc);
2327 Serge 7120
 
5060 serge 7121
	if (is_lvds && has_reduced_clock && i915.powersave)
4104 Serge 7122
		intel_crtc->lowfreq_avail = true;
7123
	else
7124
		intel_crtc->lowfreq_avail = false;
2327 Serge 7125
 
5060 serge 7126
	return 0;
4104 Serge 7127
}
3243 Serge 7128
 
4560 Serge 7129
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7130
					 struct intel_link_m_n *m_n)
4104 Serge 7131
{
7132
	struct drm_device *dev = crtc->base.dev;
7133
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 7134
	enum pipe pipe = crtc->pipe;
4104 Serge 7135
 
4560 Serge 7136
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
7137
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
7138
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
7139
		& ~TU_SIZE_MASK;
7140
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
7141
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
7142
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7143
}
7144
 
7145
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7146
					 enum transcoder transcoder,
7147
					 struct intel_link_m_n *m_n)
7148
{
7149
	struct drm_device *dev = crtc->base.dev;
7150
	struct drm_i915_private *dev_priv = dev->dev_private;
7151
	enum pipe pipe = crtc->pipe;
7152
 
7153
	if (INTEL_INFO(dev)->gen >= 5) {
7154
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
7155
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
7156
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
4104 Serge 7157
					& ~TU_SIZE_MASK;
4560 Serge 7158
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7159
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
4104 Serge 7160
				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
4560 Serge 7161
	} else {
7162
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7163
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7164
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
7165
			& ~TU_SIZE_MASK;
7166
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
7167
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
7168
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7169
	}
3243 Serge 7170
}
7171
 
4560 Serge 7172
void intel_dp_get_m_n(struct intel_crtc *crtc,
7173
		      struct intel_crtc_config *pipe_config)
7174
{
7175
	if (crtc->config.has_pch_encoder)
7176
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7177
	else
7178
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7179
					     &pipe_config->dp_m_n);
7180
}
7181
 
7182
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7183
					struct intel_crtc_config *pipe_config)
7184
{
7185
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7186
				     &pipe_config->fdi_m_n);
7187
}
7188
 
4104 Serge 7189
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7190
				     struct intel_crtc_config *pipe_config)
7191
{
7192
	struct drm_device *dev = crtc->base.dev;
7193
	struct drm_i915_private *dev_priv = dev->dev_private;
7194
	uint32_t tmp;
7195
 
7196
	tmp = I915_READ(PF_CTL(crtc->pipe));
7197
 
7198
	if (tmp & PF_ENABLE) {
7199
		pipe_config->pch_pfit.enabled = true;
7200
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
7201
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
7202
 
7203
		/* We currently do not free assignements of panel fitters on
7204
		 * ivb/hsw (since we don't use the higher upscaling modes which
7205
		 * differentiates them) so just WARN about this case for now. */
7206
		if (IS_GEN7(dev)) {
7207
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
7208
				PF_PIPE_SEL_IVB(crtc->pipe));
7209
		}
7210
	}
7211
}
7212
 
5060 serge 7213
static void ironlake_get_plane_config(struct intel_crtc *crtc,
7214
				      struct intel_plane_config *plane_config)
7215
{
7216
	struct drm_device *dev = crtc->base.dev;
7217
	struct drm_i915_private *dev_priv = dev->dev_private;
7218
	u32 val, base, offset;
7219
	int pipe = crtc->pipe, plane = crtc->plane;
7220
	int fourcc, pixel_format;
7221
	int aligned_height;
7222
 
7223
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
7224
	if (!crtc->base.primary->fb) {
7225
		DRM_DEBUG_KMS("failed to alloc fb\n");
7226
		return;
7227
	}
7228
 
7229
	val = I915_READ(DSPCNTR(plane));
7230
 
7231
	if (INTEL_INFO(dev)->gen >= 4)
7232
		if (val & DISPPLANE_TILED)
7233
			plane_config->tiled = true;
7234
 
7235
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7236
	fourcc = intel_format_to_fourcc(pixel_format);
7237
	crtc->base.primary->fb->pixel_format = fourcc;
7238
	crtc->base.primary->fb->bits_per_pixel =
7239
		drm_format_plane_cpp(fourcc, 0) * 8;
7240
 
7241
	base = I915_READ(DSPSURF(plane)) & 0xfffff000;
7242
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7243
		offset = I915_READ(DSPOFFSET(plane));
7244
	} else {
7245
		if (plane_config->tiled)
7246
			offset = I915_READ(DSPTILEOFF(plane));
7247
		else
7248
			offset = I915_READ(DSPLINOFF(plane));
7249
	}
7250
	plane_config->base = base;
7251
 
7252
	val = I915_READ(PIPESRC(pipe));
7253
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
7254
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
7255
 
7256
	val = I915_READ(DSPSTRIDE(pipe));
7257
	crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
7258
 
7259
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7260
					    plane_config->tiled);
7261
 
7262
	plane_config->size = 16*1024*1024;
7263
 
7264
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7265
		      pipe, plane, crtc->base.primary->fb->width,
7266
		      crtc->base.primary->fb->height,
7267
		      crtc->base.primary->fb->bits_per_pixel, base,
7268
		      crtc->base.primary->fb->pitches[0],
7269
		      plane_config->size);
7270
}
7271
 
3746 Serge 7272
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7273
				     struct intel_crtc_config *pipe_config)
7274
{
7275
	struct drm_device *dev = crtc->base.dev;
7276
	struct drm_i915_private *dev_priv = dev->dev_private;
7277
	uint32_t tmp;
7278
 
5060 serge 7279
	if (!intel_display_power_enabled(dev_priv,
7280
					 POWER_DOMAIN_PIPE(crtc->pipe)))
7281
		return false;
7282
 
4104 Serge 7283
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7284
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7285
 
3746 Serge 7286
	tmp = I915_READ(PIPECONF(crtc->pipe));
7287
	if (!(tmp & PIPECONF_ENABLE))
7288
		return false;
7289
 
4280 Serge 7290
	switch (tmp & PIPECONF_BPC_MASK) {
7291
	case PIPECONF_6BPC:
7292
		pipe_config->pipe_bpp = 18;
7293
		break;
7294
	case PIPECONF_8BPC:
7295
		pipe_config->pipe_bpp = 24;
7296
		break;
7297
	case PIPECONF_10BPC:
7298
		pipe_config->pipe_bpp = 30;
7299
		break;
7300
	case PIPECONF_12BPC:
7301
		pipe_config->pipe_bpp = 36;
7302
		break;
7303
	default:
7304
		break;
7305
	}
7306
 
5060 serge 7307
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7308
		pipe_config->limited_color_range = true;
7309
 
4104 Serge 7310
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7311
		struct intel_shared_dpll *pll;
7312
 
3746 Serge 7313
		pipe_config->has_pch_encoder = true;
7314
 
4104 Serge 7315
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
7316
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7317
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7318
 
7319
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7320
 
7321
		if (HAS_PCH_IBX(dev_priv->dev)) {
7322
			pipe_config->shared_dpll =
7323
				(enum intel_dpll_id) crtc->pipe;
7324
		} else {
7325
			tmp = I915_READ(PCH_DPLL_SEL);
7326
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7327
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
7328
			else
7329
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
7330
		}
7331
 
7332
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7333
 
7334
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
7335
					   &pipe_config->dpll_hw_state));
7336
 
7337
		tmp = pipe_config->dpll_hw_state.dpll;
7338
		pipe_config->pixel_multiplier =
7339
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7340
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
4560 Serge 7341
 
7342
		ironlake_pch_clock_get(crtc, pipe_config);
4104 Serge 7343
	} else {
7344
		pipe_config->pixel_multiplier = 1;
7345
	}
7346
 
7347
	intel_get_pipe_timings(crtc, pipe_config);
7348
 
7349
	ironlake_get_pfit_config(crtc, pipe_config);
7350
 
3746 Serge 7351
	return true;
7352
}
7353
 
4104 Serge 7354
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7355
{
7356
	struct drm_device *dev = dev_priv->dev;
7357
	struct intel_crtc *crtc;
7358
 
5060 serge 7359
	for_each_intel_crtc(dev, crtc)
4539 Serge 7360
		WARN(crtc->active, "CRTC for pipe %c enabled\n",
4104 Serge 7361
		     pipe_name(crtc->pipe));
7362
 
7363
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
5060 serge 7364
	WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
7365
	WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
7366
	WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
4104 Serge 7367
	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7368
	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7369
	     "CPU PWM1 enabled\n");
5060 serge 7370
	if (IS_HASWELL(dev))
4104 Serge 7371
	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7372
	     "CPU PWM2 enabled\n");
7373
	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7374
	     "PCH PWM1 enabled\n");
7375
	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
7376
	     "Utility pin enabled\n");
7377
	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
7378
 
5060 serge 7379
	/*
7380
	 * In theory we can still leave IRQs enabled, as long as only the HPD
7381
	 * interrupts remain enabled. We used to check for that, but since it's
7382
	 * gen-specific and since we only disable LCPLL after we fully disable
7383
	 * the interrupts, the check below should be enough.
7384
	 */
7385
	WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4104 Serge 7386
}
7387
 
5060 serge 7388
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
7389
{
7390
	struct drm_device *dev = dev_priv->dev;
7391
 
7392
	if (IS_HASWELL(dev))
7393
		return I915_READ(D_COMP_HSW);
7394
	else
7395
		return I915_READ(D_COMP_BDW);
7396
}
7397
 
7398
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7399
{
7400
	struct drm_device *dev = dev_priv->dev;
7401
 
7402
	if (IS_HASWELL(dev)) {
7403
		mutex_lock(&dev_priv->rps.hw_lock);
7404
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7405
					    val))
7406
			DRM_ERROR("Failed to write to D_COMP\n");
7407
		mutex_unlock(&dev_priv->rps.hw_lock);
7408
	} else {
7409
		I915_WRITE(D_COMP_BDW, val);
7410
		POSTING_READ(D_COMP_BDW);
7411
	}
7412
}
7413
 
4104 Serge 7414
/*
7415
 * This function implements pieces of two sequences from BSpec:
7416
 * - Sequence for display software to disable LCPLL
7417
 * - Sequence for display software to allow package C8+
7418
 * The steps implemented here are just the steps that actually touch the LCPLL
7419
 * register. Callers should take care of disabling all the display engine
7420
 * functions, doing the mode unset, fixing interrupts, etc.
7421
 */
4560 Serge 7422
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4104 Serge 7423
		       bool switch_to_fclk, bool allow_power_down)
7424
{
7425
	uint32_t val;
7426
 
7427
	assert_can_disable_lcpll(dev_priv);
7428
 
7429
	val = I915_READ(LCPLL_CTL);
7430
 
7431
	if (switch_to_fclk) {
7432
		val |= LCPLL_CD_SOURCE_FCLK;
7433
		I915_WRITE(LCPLL_CTL, val);
7434
 
7435
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
7436
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
7437
			DRM_ERROR("Switching to FCLK failed\n");
7438
 
7439
		val = I915_READ(LCPLL_CTL);
7440
	}
7441
 
7442
	val |= LCPLL_PLL_DISABLE;
7443
	I915_WRITE(LCPLL_CTL, val);
7444
	POSTING_READ(LCPLL_CTL);
7445
 
7446
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7447
		DRM_ERROR("LCPLL still locked\n");
7448
 
5060 serge 7449
	val = hsw_read_dcomp(dev_priv);
4104 Serge 7450
	val |= D_COMP_COMP_DISABLE;
5060 serge 7451
	hsw_write_dcomp(dev_priv, val);
7452
	ndelay(100);
4104 Serge 7453
 
5060 serge 7454
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
7455
		     1))
4104 Serge 7456
		DRM_ERROR("D_COMP RCOMP still in progress\n");
7457
 
7458
	if (allow_power_down) {
7459
		val = I915_READ(LCPLL_CTL);
7460
		val |= LCPLL_POWER_DOWN_ALLOW;
7461
		I915_WRITE(LCPLL_CTL, val);
7462
		POSTING_READ(LCPLL_CTL);
7463
	}
7464
}
7465
 
7466
/*
7467
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
7468
 * source.
7469
 */
4560 Serge 7470
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4104 Serge 7471
{
7472
	uint32_t val;
5060 serge 7473
	unsigned long irqflags;
4104 Serge 7474
 
7475
	val = I915_READ(LCPLL_CTL);
7476
 
7477
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
7478
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
7479
		return;
7480
 
5060 serge 7481
	/*
7482
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
7483
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
7484
	 *
7485
	 * The other problem is that hsw_restore_lcpll() is called as part of
7486
	 * the runtime PM resume sequence, so we can't just call
7487
	 * gen6_gt_force_wake_get() because that function calls
7488
	 * intel_runtime_pm_get(), and we can't change the runtime PM refcount
7489
	 * while we are on the resume sequence. So to solve this problem we have
7490
	 * to call special forcewake code that doesn't touch runtime PM and
7491
	 * doesn't enable the forcewake delayed work.
7492
	 */
7493
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
7494
	if (dev_priv->uncore.forcewake_count++ == 0)
7495
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
7496
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4104 Serge 7497
 
7498
	if (val & LCPLL_POWER_DOWN_ALLOW) {
7499
		val &= ~LCPLL_POWER_DOWN_ALLOW;
7500
		I915_WRITE(LCPLL_CTL, val);
7501
		POSTING_READ(LCPLL_CTL);
7502
	}
7503
 
5060 serge 7504
	val = hsw_read_dcomp(dev_priv);
4104 Serge 7505
	val |= D_COMP_COMP_FORCE;
7506
	val &= ~D_COMP_COMP_DISABLE;
5060 serge 7507
	hsw_write_dcomp(dev_priv, val);
4104 Serge 7508
 
7509
	val = I915_READ(LCPLL_CTL);
7510
	val &= ~LCPLL_PLL_DISABLE;
7511
	I915_WRITE(LCPLL_CTL, val);
7512
 
7513
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
7514
		DRM_ERROR("LCPLL not locked yet\n");
7515
 
7516
	if (val & LCPLL_CD_SOURCE_FCLK) {
7517
		val = I915_READ(LCPLL_CTL);
7518
		val &= ~LCPLL_CD_SOURCE_FCLK;
7519
		I915_WRITE(LCPLL_CTL, val);
7520
 
7521
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
7522
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
7523
			DRM_ERROR("Switching back to LCPLL failed\n");
7524
	}
7525
 
5060 serge 7526
	/* See the big comment above. */
7527
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
7528
	if (--dev_priv->uncore.forcewake_count == 0)
7529
		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
7530
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4104 Serge 7531
}
7532
 
5060 serge 7533
/*
7534
 * Package states C8 and deeper are really deep PC states that can only be
7535
 * reached when all the devices on the system allow it, so even if the graphics
7536
 * device allows PC8+, it doesn't mean the system will actually get to these
7537
 * states. Our driver only allows PC8+ when going into runtime PM.
7538
 *
7539
 * The requirements for PC8+ are that all the outputs are disabled, the power
7540
 * well is disabled and most interrupts are disabled, and these are also
7541
 * requirements for runtime PM. When these conditions are met, we manually do
7542
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
7543
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
7544
 * hang the machine.
7545
 *
7546
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
7547
 * the state of some registers, so when we come back from PC8+ we need to
7548
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
7549
 * need to take care of the registers kept by RC6. Notice that this happens even
7550
 * if we don't put the device in PCI D3 state (which is what currently happens
7551
 * because of the runtime PM support).
7552
 *
7553
 * For more, read "Display Sequences for Package C8" on the hardware
7554
 * documentation.
7555
 */
7556
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 7557
{
7558
	struct drm_device *dev = dev_priv->dev;
7559
	uint32_t val;
7560
 
7561
	DRM_DEBUG_KMS("Enabling package C8+\n");
7562
 
7563
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7564
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
7565
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7566
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7567
	}
7568
 
7569
	lpt_disable_clkout_dp(dev);
7570
	hsw_disable_lcpll(dev_priv, true, true);
7571
}
7572
 
5060 serge 7573
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4104 Serge 7574
{
7575
	struct drm_device *dev = dev_priv->dev;
7576
	uint32_t val;
7577
 
7578
	DRM_DEBUG_KMS("Disabling package C8+\n");
7579
 
7580
	hsw_restore_lcpll(dev_priv);
7581
	lpt_init_pch_refclk(dev);
7582
 
7583
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7584
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
7585
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
7586
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7587
	}
7588
 
7589
	intel_prepare_ddi(dev);
7590
}
7591
 
5060 serge 7592
static void snb_modeset_global_resources(struct drm_device *dev)
4104 Serge 7593
{
5060 serge 7594
	modeset_update_crtc_power_domains(dev);
4104 Serge 7595
}
7596
 
5060 serge 7597
static void haswell_modeset_global_resources(struct drm_device *dev)
4104 Serge 7598
{
5060 serge 7599
	modeset_update_crtc_power_domains(dev);
4104 Serge 7600
}
7601
 
5060 serge 7602
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
7603
				 int x, int y,
7604
				 struct drm_framebuffer *fb)
4104 Serge 7605
{
5060 serge 7606
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4104 Serge 7607
 
5060 serge 7608
	if (!intel_ddi_pll_select(intel_crtc))
7609
		return -EINVAL;
4104 Serge 7610
 
5060 serge 7611
	intel_crtc->lowfreq_avail = false;
4104 Serge 7612
 
5060 serge 7613
	return 0;
4104 Serge 7614
}
7615
 
5060 serge 7616
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7617
				       struct intel_crtc_config *pipe_config)
4104 Serge 7618
{
5060 serge 7619
	struct drm_device *dev = crtc->base.dev;
4104 Serge 7620
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 7621
	struct intel_shared_dpll *pll;
7622
	enum port port;
7623
	uint32_t tmp;
4104 Serge 7624
 
5060 serge 7625
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4560 Serge 7626
 
5060 serge 7627
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
4104 Serge 7628
 
5060 serge 7629
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
4104 Serge 7630
 
5060 serge 7631
	switch (pipe_config->ddi_pll_sel) {
7632
	case PORT_CLK_SEL_WRPLL1:
7633
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
7634
		break;
7635
	case PORT_CLK_SEL_WRPLL2:
7636
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
7637
		break;
4104 Serge 7638
	}
7639
 
5060 serge 7640
	if (pipe_config->shared_dpll >= 0) {
7641
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
4560 Serge 7642
 
5060 serge 7643
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
7644
					   &pipe_config->dpll_hw_state));
4104 Serge 7645
	}
7646
 
4560 Serge 7647
	/*
5060 serge 7648
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
7649
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
7650
	 * the PCH transcoder is on.
4560 Serge 7651
	 */
5060 serge 7652
	if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7653
		pipe_config->has_pch_encoder = true;
4560 Serge 7654
 
5060 serge 7655
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7656
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7657
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
3480 Serge 7658
 
5060 serge 7659
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
3480 Serge 7660
	}
4560 Serge 7661
}
7662
 
3746 Serge 7663
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7664
				    struct intel_crtc_config *pipe_config)
7665
{
7666
	struct drm_device *dev = crtc->base.dev;
7667
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 7668
	enum intel_display_power_domain pfit_domain;
3746 Serge 7669
	uint32_t tmp;
7670
 
5060 serge 7671
	if (!intel_display_power_enabled(dev_priv,
7672
					 POWER_DOMAIN_PIPE(crtc->pipe)))
7673
		return false;
7674
 
4104 Serge 7675
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7676
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7677
 
7678
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
7679
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
7680
		enum pipe trans_edp_pipe;
7681
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
7682
		default:
7683
			WARN(1, "unknown pipe linked to edp transcoder\n");
7684
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
7685
		case TRANS_DDI_EDP_INPUT_A_ON:
7686
			trans_edp_pipe = PIPE_A;
7687
			break;
7688
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
7689
			trans_edp_pipe = PIPE_B;
7690
			break;
7691
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
7692
			trans_edp_pipe = PIPE_C;
7693
			break;
7694
		}
7695
 
7696
		if (trans_edp_pipe == crtc->pipe)
7697
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
7698
	}
7699
 
5060 serge 7700
	if (!intel_display_power_enabled(dev_priv,
4104 Serge 7701
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7702
		return false;
7703
 
7704
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
3746 Serge 7705
	if (!(tmp & PIPECONF_ENABLE))
7706
		return false;
7707
 
5060 serge 7708
	haswell_get_ddi_port_state(crtc, pipe_config);
3746 Serge 7709
 
4104 Serge 7710
	intel_get_pipe_timings(crtc, pipe_config);
7711
 
7712
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
5060 serge 7713
	if (intel_display_power_enabled(dev_priv, pfit_domain))
4104 Serge 7714
		ironlake_get_pfit_config(crtc, pipe_config);
7715
 
4560 Serge 7716
	if (IS_HASWELL(dev))
4104 Serge 7717
	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7718
				   (I915_READ(IPS_CTL) & IPS_ENABLE);
7719
 
7720
	pipe_config->pixel_multiplier = 1;
7721
 
3746 Serge 7722
	return true;
7723
}
7724
 
4560 Serge 7725
static struct {
7726
	int clock;
7727
	u32 config;
7728
} hdmi_audio_clock[] = {
7729
	{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
7730
	{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
7731
	{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
7732
	{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
7733
	{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
7734
	{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
7735
	{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
7736
	{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
7737
	{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
7738
	{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
7739
};
7740
 
7741
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
7742
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
7743
{
7744
	int i;
7745
 
7746
	for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
7747
		if (mode->clock == hdmi_audio_clock[i].clock)
7748
			break;
7749
	}
7750
 
7751
	if (i == ARRAY_SIZE(hdmi_audio_clock)) {
7752
		DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
7753
		i = 1;
7754
	}
7755
 
7756
	DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
7757
		      hdmi_audio_clock[i].clock,
7758
		      hdmi_audio_clock[i].config);
7759
 
7760
	return hdmi_audio_clock[i].config;
7761
}
7762
 
2342 Serge 7763
static bool intel_eld_uptodate(struct drm_connector *connector,
7764
			       int reg_eldv, uint32_t bits_eldv,
7765
			       int reg_elda, uint32_t bits_elda,
7766
			       int reg_edid)
7767
{
7768
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7769
	uint8_t *eld = connector->eld;
7770
	uint32_t i;
7771
 
7772
	i = I915_READ(reg_eldv);
7773
	i &= bits_eldv;
7774
 
7775
	if (!eld[0])
7776
		return !i;
7777
 
7778
	if (!i)
7779
		return false;
7780
 
7781
	i = I915_READ(reg_elda);
7782
	i &= ~bits_elda;
7783
	I915_WRITE(reg_elda, i);
7784
 
7785
	for (i = 0; i < eld[2]; i++)
7786
		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
7787
			return false;
7788
 
7789
	return true;
7790
}
7791
 
7792
static void g4x_write_eld(struct drm_connector *connector,
4560 Serge 7793
			  struct drm_crtc *crtc,
7794
			  struct drm_display_mode *mode)
2342 Serge 7795
{
7796
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7797
	uint8_t *eld = connector->eld;
7798
	uint32_t eldv;
7799
	uint32_t len;
7800
	uint32_t i;
7801
 
7802
	i = I915_READ(G4X_AUD_VID_DID);
7803
 
7804
	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
7805
		eldv = G4X_ELDV_DEVCL_DEVBLC;
7806
	else
7807
		eldv = G4X_ELDV_DEVCTG;
7808
 
7809
	if (intel_eld_uptodate(connector,
7810
			       G4X_AUD_CNTL_ST, eldv,
7811
			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
7812
			       G4X_HDMIW_HDMIEDID))
7813
		return;
7814
 
7815
	i = I915_READ(G4X_AUD_CNTL_ST);
7816
	i &= ~(eldv | G4X_ELD_ADDR);
7817
	len = (i >> 9) & 0x1f;		/* ELD buffer size */
7818
	I915_WRITE(G4X_AUD_CNTL_ST, i);
7819
 
7820
	if (!eld[0])
7821
		return;
7822
 
7823
	len = min_t(uint8_t, eld[2], len);
7824
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7825
	for (i = 0; i < len; i++)
7826
		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
7827
 
7828
	i = I915_READ(G4X_AUD_CNTL_ST);
7829
	i |= eldv;
7830
	I915_WRITE(G4X_AUD_CNTL_ST, i);
7831
}
7832
 
3031 serge 7833
static void haswell_write_eld(struct drm_connector *connector,
4560 Serge 7834
			      struct drm_crtc *crtc,
7835
			      struct drm_display_mode *mode)
3031 serge 7836
{
7837
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7838
	uint8_t *eld = connector->eld;
7839
	uint32_t eldv;
7840
	uint32_t i;
7841
	int len;
7842
	int pipe = to_intel_crtc(crtc)->pipe;
7843
	int tmp;
7844
 
7845
	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
7846
	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
7847
	int aud_config = HSW_AUD_CFG(pipe);
7848
	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
7849
 
7850
	/* Audio output enable */
7851
	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
7852
	tmp = I915_READ(aud_cntrl_st2);
7853
	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
7854
	I915_WRITE(aud_cntrl_st2, tmp);
5060 serge 7855
	POSTING_READ(aud_cntrl_st2);
3031 serge 7856
 
5060 serge 7857
	assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
3031 serge 7858
 
7859
	/* Set ELD valid state */
7860
	tmp = I915_READ(aud_cntrl_st2);
4104 Serge 7861
	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
3031 serge 7862
	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
7863
	I915_WRITE(aud_cntrl_st2, tmp);
7864
	tmp = I915_READ(aud_cntrl_st2);
4104 Serge 7865
	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
3031 serge 7866
 
7867
	/* Enable HDMI mode */
7868
	tmp = I915_READ(aud_config);
4104 Serge 7869
	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
3031 serge 7870
	/* clear N_programing_enable and N_value_index */
7871
	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
7872
	I915_WRITE(aud_config, tmp);
7873
 
7874
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7875
 
7876
	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
7877
 
7878
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7879
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7880
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
7881
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
4560 Serge 7882
	} else {
7883
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7884
	}
3031 serge 7885
 
7886
	if (intel_eld_uptodate(connector,
7887
			       aud_cntrl_st2, eldv,
7888
			       aud_cntl_st, IBX_ELD_ADDRESS,
7889
			       hdmiw_hdmiedid))
7890
		return;
7891
 
7892
	i = I915_READ(aud_cntrl_st2);
7893
	i &= ~eldv;
7894
	I915_WRITE(aud_cntrl_st2, i);
7895
 
7896
	if (!eld[0])
7897
		return;
7898
 
7899
	i = I915_READ(aud_cntl_st);
7900
	i &= ~IBX_ELD_ADDRESS;
7901
	I915_WRITE(aud_cntl_st, i);
7902
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
7903
	DRM_DEBUG_DRIVER("port num:%d\n", i);
7904
 
7905
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
7906
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7907
	for (i = 0; i < len; i++)
7908
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7909
 
7910
	i = I915_READ(aud_cntrl_st2);
7911
	i |= eldv;
7912
	I915_WRITE(aud_cntrl_st2, i);
7913
 
7914
}
7915
 
2342 Serge 7916
static void ironlake_write_eld(struct drm_connector *connector,
4560 Serge 7917
			       struct drm_crtc *crtc,
7918
			       struct drm_display_mode *mode)
2342 Serge 7919
{
7920
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7921
	uint8_t *eld = connector->eld;
7922
	uint32_t eldv;
7923
	uint32_t i;
7924
	int len;
7925
	int hdmiw_hdmiedid;
3031 serge 7926
	int aud_config;
2342 Serge 7927
	int aud_cntl_st;
7928
	int aud_cntrl_st2;
3031 serge 7929
	int pipe = to_intel_crtc(crtc)->pipe;
2342 Serge 7930
 
7931
	if (HAS_PCH_IBX(connector->dev)) {
3031 serge 7932
		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
7933
		aud_config = IBX_AUD_CFG(pipe);
7934
		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
2342 Serge 7935
		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
4560 Serge 7936
	} else if (IS_VALLEYVIEW(connector->dev)) {
7937
		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
7938
		aud_config = VLV_AUD_CFG(pipe);
7939
		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
7940
		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
2342 Serge 7941
	} else {
3031 serge 7942
		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
7943
		aud_config = CPT_AUD_CFG(pipe);
7944
		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
2342 Serge 7945
		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
7946
	}
7947
 
3031 serge 7948
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
2342 Serge 7949
 
4560 Serge 7950
	if (IS_VALLEYVIEW(connector->dev))  {
7951
		struct intel_encoder *intel_encoder;
7952
		struct intel_digital_port *intel_dig_port;
7953
 
7954
		intel_encoder = intel_attached_encoder(connector);
7955
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
7956
		i = intel_dig_port->port;
7957
	} else {
2342 Serge 7958
	i = I915_READ(aud_cntl_st);
4560 Serge 7959
		i = (i >> 29) & DIP_PORT_SEL_MASK;
7960
		/* DIP_Port_Select, 0x1 = PortB */
7961
	}
7962
 
2342 Serge 7963
	if (!i) {
7964
		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
7965
		/* operate blindly on all ports */
7966
		eldv = IBX_ELD_VALIDB;
7967
		eldv |= IBX_ELD_VALIDB << 4;
7968
		eldv |= IBX_ELD_VALIDB << 8;
7969
	} else {
4104 Serge 7970
		DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
2342 Serge 7971
		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
7972
	}
7973
 
7974
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7975
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7976
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
3031 serge 7977
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
4560 Serge 7978
	} else {
7979
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7980
	}
2342 Serge 7981
 
7982
	if (intel_eld_uptodate(connector,
7983
			       aud_cntrl_st2, eldv,
7984
			       aud_cntl_st, IBX_ELD_ADDRESS,
7985
			       hdmiw_hdmiedid))
7986
		return;
7987
 
7988
	i = I915_READ(aud_cntrl_st2);
7989
	i &= ~eldv;
7990
	I915_WRITE(aud_cntrl_st2, i);
7991
 
7992
	if (!eld[0])
7993
		return;
7994
 
7995
	i = I915_READ(aud_cntl_st);
7996
	i &= ~IBX_ELD_ADDRESS;
7997
	I915_WRITE(aud_cntl_st, i);
7998
 
7999
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
8000
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
8001
	for (i = 0; i < len; i++)
8002
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
8003
 
8004
	i = I915_READ(aud_cntrl_st2);
8005
	i |= eldv;
8006
	I915_WRITE(aud_cntrl_st2, i);
8007
}
8008
 
8009
void intel_write_eld(struct drm_encoder *encoder,
8010
		     struct drm_display_mode *mode)
8011
{
8012
	struct drm_crtc *crtc = encoder->crtc;
8013
	struct drm_connector *connector;
8014
	struct drm_device *dev = encoder->dev;
8015
	struct drm_i915_private *dev_priv = dev->dev_private;
8016
 
8017
	connector = drm_select_eld(encoder, mode);
8018
	if (!connector)
8019
		return;
8020
 
8021
	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8022
			 connector->base.id,
5060 serge 8023
			 connector->name,
2342 Serge 8024
			 connector->encoder->base.id,
5060 serge 8025
			 connector->encoder->name);
2342 Serge 8026
 
8027
	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
8028
 
8029
	if (dev_priv->display.write_eld)
4560 Serge 8030
		dev_priv->display.write_eld(connector, crtc, mode);
2342 Serge 8031
}
8032
 
3031 serge 8033
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
8034
{
8035
	struct drm_device *dev = crtc->dev;
8036
	struct drm_i915_private *dev_priv = dev->dev_private;
8037
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 8038
	uint32_t cntl;
2327 Serge 8039
 
5060 serge 8040
	if (base != intel_crtc->cursor_base) {
3031 serge 8041
		/* On these chipsets we can only modify the base whilst
8042
		 * the cursor is disabled.
8043
		 */
5060 serge 8044
		if (intel_crtc->cursor_cntl) {
8045
			I915_WRITE(_CURACNTR, 0);
8046
			POSTING_READ(_CURACNTR);
8047
			intel_crtc->cursor_cntl = 0;
8048
		}
8049
 
3031 serge 8050
		I915_WRITE(_CURABASE, base);
5060 serge 8051
		POSTING_READ(_CURABASE);
8052
	}
2327 Serge 8053
 
3031 serge 8054
		/* XXX width must be 64, stride 256 => 0x00 << 28 */
5060 serge 8055
	cntl = 0;
8056
	if (base)
8057
		cntl = (CURSOR_ENABLE |
3031 serge 8058
			CURSOR_GAMMA_ENABLE |
5060 serge 8059
			CURSOR_FORMAT_ARGB);
8060
	if (intel_crtc->cursor_cntl != cntl) {
3031 serge 8061
	I915_WRITE(_CURACNTR, cntl);
5060 serge 8062
		POSTING_READ(_CURACNTR);
8063
		intel_crtc->cursor_cntl = cntl;
8064
	}
3031 serge 8065
}
2327 Serge 8066
 
3031 serge 8067
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8068
{
8069
	struct drm_device *dev = crtc->dev;
8070
	struct drm_i915_private *dev_priv = dev->dev_private;
8071
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8072
	int pipe = intel_crtc->pipe;
5060 serge 8073
	uint32_t cntl;
2327 Serge 8074
 
5060 serge 8075
	cntl = 0;
3031 serge 8076
		if (base) {
5060 serge 8077
		cntl = MCURSOR_GAMMA_ENABLE;
8078
		switch (intel_crtc->cursor_width) {
8079
			case 64:
8080
				cntl |= CURSOR_MODE_64_ARGB_AX;
8081
				break;
8082
			case 128:
8083
				cntl |= CURSOR_MODE_128_ARGB_AX;
8084
				break;
8085
			case 256:
8086
				cntl |= CURSOR_MODE_256_ARGB_AX;
8087
				break;
8088
			default:
8089
				WARN_ON(1);
8090
				return;
8091
			}
3031 serge 8092
			cntl |= pipe << 28; /* Connect to correct pipe */
8093
		}
5060 serge 8094
	if (intel_crtc->cursor_cntl != cntl) {
3031 serge 8095
		I915_WRITE(CURCNTR(pipe), cntl);
5060 serge 8096
		POSTING_READ(CURCNTR(pipe));
8097
		intel_crtc->cursor_cntl = cntl;
8098
	}
2327 Serge 8099
 
3031 serge 8100
	/* and commit changes on next vblank */
8101
	I915_WRITE(CURBASE(pipe), base);
4371 Serge 8102
	POSTING_READ(CURBASE(pipe));
3031 serge 8103
}
2327 Serge 8104
 
3031 serge 8105
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
8106
{
8107
	struct drm_device *dev = crtc->dev;
8108
	struct drm_i915_private *dev_priv = dev->dev_private;
8109
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8110
	int pipe = intel_crtc->pipe;
5060 serge 8111
	uint32_t cntl;
2327 Serge 8112
 
5060 serge 8113
	cntl = 0;
3031 serge 8114
		if (base) {
5060 serge 8115
		cntl = MCURSOR_GAMMA_ENABLE;
8116
		switch (intel_crtc->cursor_width) {
8117
			case 64:
8118
				cntl |= CURSOR_MODE_64_ARGB_AX;
8119
				break;
8120
			case 128:
8121
				cntl |= CURSOR_MODE_128_ARGB_AX;
8122
				break;
8123
			case 256:
8124
				cntl |= CURSOR_MODE_256_ARGB_AX;
8125
				break;
8126
			default:
8127
				WARN_ON(1);
8128
				return;
8129
			}
3031 serge 8130
		}
5060 serge 8131
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3480 Serge 8132
			cntl |= CURSOR_PIPE_CSC_ENABLE;
5060 serge 8133
 
8134
	if (intel_crtc->cursor_cntl != cntl) {
8135
		I915_WRITE(CURCNTR(pipe), cntl);
8136
		POSTING_READ(CURCNTR(pipe));
8137
		intel_crtc->cursor_cntl = cntl;
4104 Serge 8138
		}
2327 Serge 8139
 
3031 serge 8140
	/* and commit changes on next vblank */
5060 serge 8141
	I915_WRITE(CURBASE(pipe), base);
8142
	POSTING_READ(CURBASE(pipe));
3031 serge 8143
}
2327 Serge 8144
 
3031 serge 8145
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5060 serge 8146
void intel_crtc_update_cursor(struct drm_crtc *crtc,
3031 serge 8147
				     bool on)
8148
{
8149
	struct drm_device *dev = crtc->dev;
8150
	struct drm_i915_private *dev_priv = dev->dev_private;
8151
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8152
	int pipe = intel_crtc->pipe;
5060 serge 8153
	int x = crtc->cursor_x;
8154
	int y = crtc->cursor_y;
4560 Serge 8155
	u32 base = 0, pos = 0;
2327 Serge 8156
 
4560 Serge 8157
	if (on)
8158
		base = intel_crtc->cursor_addr;
2327 Serge 8159
 
4560 Serge 8160
	if (x >= intel_crtc->config.pipe_src_w)
3031 serge 8161
			base = 0;
2327 Serge 8162
 
4560 Serge 8163
	if (y >= intel_crtc->config.pipe_src_h)
3031 serge 8164
		base = 0;
2327 Serge 8165
 
3031 serge 8166
	if (x < 0) {
4560 Serge 8167
		if (x + intel_crtc->cursor_width <= 0)
3031 serge 8168
			base = 0;
2327 Serge 8169
 
3031 serge 8170
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
8171
		x = -x;
8172
	}
8173
	pos |= x << CURSOR_X_SHIFT;
2327 Serge 8174
 
3031 serge 8175
	if (y < 0) {
4560 Serge 8176
		if (y + intel_crtc->cursor_height <= 0)
3031 serge 8177
			base = 0;
2327 Serge 8178
 
3031 serge 8179
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
8180
		y = -y;
8181
	}
8182
	pos |= y << CURSOR_Y_SHIFT;
2327 Serge 8183
 
5060 serge 8184
	if (base == 0 && intel_crtc->cursor_base == 0)
3031 serge 8185
		return;
2327 Serge 8186
 
5060 serge 8187
	I915_WRITE(CURPOS(pipe), pos);
8188
 
8189
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
3031 serge 8190
		ivb_update_cursor(crtc, base);
5060 serge 8191
	else if (IS_845G(dev) || IS_I865G(dev))
8192
		i845_update_cursor(crtc, base);
8193
	else
4560 Serge 8194
		i9xx_update_cursor(crtc, base);
5060 serge 8195
	intel_crtc->cursor_base = base;
3031 serge 8196
}
2327 Serge 8197
 
5060 serge 8198
/*
8199
 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
8200
 *
8201
 * Note that the object's reference will be consumed if the update fails.  If
8202
 * the update succeeds, the reference of the old object (if any) will be
8203
 * consumed.
8204
 */
8205
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8206
				     struct drm_i915_gem_object *obj,
3031 serge 8207
				 uint32_t width, uint32_t height)
8208
{
8209
	struct drm_device *dev = crtc->dev;
8210
	struct drm_i915_private *dev_priv = dev->dev_private;
8211
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 8212
	enum pipe pipe = intel_crtc->pipe;
8213
	unsigned old_width;
3031 serge 8214
	uint32_t addr;
8215
	int ret;
2327 Serge 8216
 
3031 serge 8217
	/* if we want to turn off the cursor ignore width and height */
5060 serge 8218
	if (!obj) {
3031 serge 8219
		DRM_DEBUG_KMS("cursor off\n");
8220
		addr = 0;
8221
		obj = NULL;
8222
		mutex_lock(&dev->struct_mutex);
8223
		goto finish;
8224
	}
2327 Serge 8225
 
5060 serge 8226
	/* Check for which cursor types we support */
8227
	if (!((width == 64 && height == 64) ||
8228
			(width == 128 && height == 128 && !IS_GEN2(dev)) ||
8229
			(width == 256 && height == 256 && !IS_GEN2(dev)))) {
8230
		DRM_DEBUG("Cursor dimension not supported\n");
3031 serge 8231
		return -EINVAL;
8232
	}
2327 Serge 8233
 
3031 serge 8234
	if (obj->base.size < width * height * 4) {
5060 serge 8235
		DRM_DEBUG_KMS("buffer is too small\n");
3031 serge 8236
		ret = -ENOMEM;
8237
		goto fail;
8238
	}
2327 Serge 8239
 
3031 serge 8240
	/* we only need to pin inside GTT if cursor is non-phy */
8241
	mutex_lock(&dev->struct_mutex);
5060 serge 8242
	if (!INTEL_INFO(dev)->cursor_needs_physical) {
3746 Serge 8243
		unsigned alignment;
8244
 
3031 serge 8245
		if (obj->tiling_mode) {
5060 serge 8246
			DRM_DEBUG_KMS("cursor cannot be tiled\n");
3031 serge 8247
			ret = -EINVAL;
8248
			goto fail_locked;
8249
		}
2327 Serge 8250
 
5097 serge 8251
		/*
8252
		 * Global gtt pte registers are special registers which actually
8253
		 * forward writes to a chunk of system memory. Which means that
8254
		 * there is no risk that the register values disappear as soon
8255
		 * as we call intel_runtime_pm_put(), so it is correct to wrap
8256
		 * only the pin/unpin/fence and not more.
8257
		 */
8258
		intel_runtime_pm_get(dev_priv);
8259
 
3746 Serge 8260
		/* Note that the w/a also requires 2 PTE of padding following
8261
		 * the bo. We currently fill all unused PTE with the shadow
8262
		 * page and so we should always have valid PTE following the
8263
		 * cursor preventing the VT-d warning.
8264
		 */
8265
		alignment = 0;
8266
		if (need_vtd_wa(dev))
8267
			alignment = 64*1024;
8268
 
8269
		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
3031 serge 8270
		if (ret) {
5060 serge 8271
			DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
5097 serge 8272
			intel_runtime_pm_put(dev_priv);
3031 serge 8273
			goto fail_locked;
8274
		}
2327 Serge 8275
 
3031 serge 8276
		ret = i915_gem_object_put_fence(obj);
8277
		if (ret) {
5060 serge 8278
			DRM_DEBUG_KMS("failed to release fence for cursor");
5097 serge 8279
			intel_runtime_pm_put(dev_priv);
3031 serge 8280
			goto fail_unpin;
8281
		}
2327 Serge 8282
 
4104 Serge 8283
		addr = i915_gem_obj_ggtt_offset(obj);
5097 serge 8284
 
8285
		intel_runtime_pm_put(dev_priv);
3031 serge 8286
	} else {
8287
		int align = IS_I830(dev) ? 16 * 1024 : 256;
5060 serge 8288
//		ret = i915_gem_object_attach_phys(obj, align);
8289
//		if (ret) {
8290
//			DRM_DEBUG_KMS("failed to attach phys object\n");
8291
//			goto fail_locked;
8292
//		}
8293
//		addr = obj->phys_handle->busaddr;
3031 serge 8294
	}
2327 Serge 8295
 
3031 serge 8296
	if (IS_GEN2(dev))
8297
		I915_WRITE(CURSIZE, (height << 12) | width);
2327 Serge 8298
 
3031 serge 8299
 finish:
8300
	if (intel_crtc->cursor_bo) {
5060 serge 8301
		if (!INTEL_INFO(dev)->cursor_needs_physical)
4104 Serge 8302
			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
3031 serge 8303
	}
2327 Serge 8304
 
5060 serge 8305
	i915_gem_track_fb(intel_crtc->cursor_bo, obj,
8306
			  INTEL_FRONTBUFFER_CURSOR(pipe));
3031 serge 8307
	mutex_unlock(&dev->struct_mutex);
2327 Serge 8308
 
5060 serge 8309
	old_width = intel_crtc->cursor_width;
8310
 
3031 serge 8311
	intel_crtc->cursor_addr = addr;
8312
	intel_crtc->cursor_bo = obj;
8313
	intel_crtc->cursor_width = width;
8314
	intel_crtc->cursor_height = height;
2327 Serge 8315
 
5060 serge 8316
	if (intel_crtc->active) {
8317
		if (old_width != width)
8318
			intel_update_watermarks(crtc);
4104 Serge 8319
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
5060 serge 8320
	}
2327 Serge 8321
 
3031 serge 8322
	return 0;
8323
fail_unpin:
4104 Serge 8324
	i915_gem_object_unpin_from_display_plane(obj);
3031 serge 8325
fail_locked:
8326
	mutex_unlock(&dev->struct_mutex);
8327
fail:
8328
	drm_gem_object_unreference_unlocked(&obj->base);
8329
	return ret;
8330
}
2327 Serge 8331
 
2330 Serge 8332
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8333
				 u16 *blue, uint32_t start, uint32_t size)
8334
{
8335
	int end = (start + size > 256) ? 256 : start + size, i;
8336
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 8337
 
2330 Serge 8338
	for (i = start; i < end; i++) {
8339
		intel_crtc->lut_r[i] = red[i] >> 8;
8340
		intel_crtc->lut_g[i] = green[i] >> 8;
8341
		intel_crtc->lut_b[i] = blue[i] >> 8;
8342
	}
2327 Serge 8343
 
2330 Serge 8344
	intel_crtc_load_lut(crtc);
8345
}
2327 Serge 8346
 
2330 Serge 8347
/* VESA 640x480x72Hz mode to set on the pipe */
8348
static struct drm_display_mode load_detect_mode = {
8349
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8350
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8351
};
2327 Serge 8352
 
4560 Serge 8353
struct drm_framebuffer *
5060 serge 8354
__intel_framebuffer_create(struct drm_device *dev,
3031 serge 8355
			 struct drm_mode_fb_cmd2 *mode_cmd,
8356
			 struct drm_i915_gem_object *obj)
8357
{
8358
	struct intel_framebuffer *intel_fb;
8359
	int ret;
2327 Serge 8360
 
3031 serge 8361
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8362
	if (!intel_fb) {
8363
		drm_gem_object_unreference_unlocked(&obj->base);
8364
		return ERR_PTR(-ENOMEM);
8365
	}
2327 Serge 8366
 
3031 serge 8367
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
4560 Serge 8368
	if (ret)
8369
		goto err;
8370
 
8371
	return &intel_fb->base;
8372
err:
3031 serge 8373
		drm_gem_object_unreference_unlocked(&obj->base);
8374
		kfree(intel_fb);
4560 Serge 8375
 
3031 serge 8376
		return ERR_PTR(ret);
8377
}
2327 Serge 8378
 
5060 serge 8379
static struct drm_framebuffer *
8380
intel_framebuffer_create(struct drm_device *dev,
8381
			 struct drm_mode_fb_cmd2 *mode_cmd,
8382
			 struct drm_i915_gem_object *obj)
8383
{
8384
	struct drm_framebuffer *fb;
8385
	int ret;
8386
 
8387
	ret = i915_mutex_lock_interruptible(dev);
8388
	if (ret)
8389
		return ERR_PTR(ret);
8390
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
8391
	mutex_unlock(&dev->struct_mutex);
8392
 
8393
	return fb;
8394
}
8395
 
2330 Serge 8396
static u32
8397
intel_framebuffer_pitch_for_width(int width, int bpp)
8398
{
8399
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
8400
	return ALIGN(pitch, 64);
8401
}
2327 Serge 8402
 
2330 Serge 8403
static u32
8404
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8405
{
8406
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5060 serge 8407
	return PAGE_ALIGN(pitch * mode->vdisplay);
2330 Serge 8408
}
2327 Serge 8409
 
2330 Serge 8410
static struct drm_framebuffer *
8411
intel_framebuffer_create_for_mode(struct drm_device *dev,
8412
				  struct drm_display_mode *mode,
8413
				  int depth, int bpp)
8414
{
8415
	struct drm_i915_gem_object *obj;
3243 Serge 8416
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2327 Serge 8417
 
5060 serge 8418
	obj = i915_gem_alloc_object(dev,
8419
				    intel_framebuffer_size_for_mode(mode, bpp));
8420
	if (obj == NULL)
8421
		return ERR_PTR(-ENOMEM);
8422
 
8423
	mode_cmd.width = mode->hdisplay;
8424
	mode_cmd.height = mode->vdisplay;
8425
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
8426
								bpp);
8427
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
8428
 
8429
	return intel_framebuffer_create(dev, &mode_cmd, obj);
2330 Serge 8430
}
2327 Serge 8431
 
2330 Serge 8432
static struct drm_framebuffer *
8433
mode_fits_in_fbdev(struct drm_device *dev,
8434
		   struct drm_display_mode *mode)
8435
{
4560 Serge 8436
#ifdef CONFIG_DRM_I915_FBDEV
2330 Serge 8437
	struct drm_i915_private *dev_priv = dev->dev_private;
8438
	struct drm_i915_gem_object *obj;
8439
	struct drm_framebuffer *fb;
2327 Serge 8440
 
5060 serge 8441
	if (!dev_priv->fbdev)
4280 Serge 8442
		return NULL;
2327 Serge 8443
 
5060 serge 8444
	if (!dev_priv->fbdev->fb)
2330 Serge 8445
		return NULL;
2327 Serge 8446
 
5060 serge 8447
	obj = dev_priv->fbdev->fb->obj;
8448
	BUG_ON(!obj);
8449
 
8450
	fb = &dev_priv->fbdev->fb->base;
3031 serge 8451
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
8452
							       fb->bits_per_pixel))
4280 Serge 8453
		return NULL;
2327 Serge 8454
 
3031 serge 8455
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
8456
		return NULL;
8457
 
4280 Serge 8458
	return fb;
4560 Serge 8459
#else
8460
	return NULL;
8461
#endif
2330 Serge 8462
}
2327 Serge 8463
 
3031 serge 8464
bool intel_get_load_detect_pipe(struct drm_connector *connector,
2330 Serge 8465
				struct drm_display_mode *mode,
5060 serge 8466
				struct intel_load_detect_pipe *old,
8467
				struct drm_modeset_acquire_ctx *ctx)
2330 Serge 8468
{
8469
	struct intel_crtc *intel_crtc;
3031 serge 8470
	struct intel_encoder *intel_encoder =
8471
		intel_attached_encoder(connector);
2330 Serge 8472
	struct drm_crtc *possible_crtc;
8473
	struct drm_encoder *encoder = &intel_encoder->base;
8474
	struct drm_crtc *crtc = NULL;
8475
	struct drm_device *dev = encoder->dev;
3031 serge 8476
	struct drm_framebuffer *fb;
5060 serge 8477
	struct drm_mode_config *config = &dev->mode_config;
8478
	int ret, i = -1;
2327 Serge 8479
 
2330 Serge 8480
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 8481
		      connector->base.id, connector->name,
8482
		      encoder->base.id, encoder->name);
2327 Serge 8483
 
5060 serge 8484
retry:
8485
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
8486
	if (ret)
8487
		goto fail_unlock;
8488
 
2330 Serge 8489
	/*
8490
	 * Algorithm gets a little messy:
8491
	 *
8492
	 *   - if the connector already has an assigned crtc, use it (but make
8493
	 *     sure it's on first)
8494
	 *
8495
	 *   - try to find the first unused crtc that can drive this connector,
8496
	 *     and use that if we find one
8497
	 */
2327 Serge 8498
 
2330 Serge 8499
	/* See if we already have a CRTC for this connector */
8500
	if (encoder->crtc) {
8501
		crtc = encoder->crtc;
2327 Serge 8502
 
5060 serge 8503
		ret = drm_modeset_lock(&crtc->mutex, ctx);
8504
		if (ret)
8505
			goto fail_unlock;
3480 Serge 8506
 
3031 serge 8507
		old->dpms_mode = connector->dpms;
2330 Serge 8508
		old->load_detect_temp = false;
2327 Serge 8509
 
2330 Serge 8510
		/* Make sure the crtc and connector are running */
3031 serge 8511
		if (connector->dpms != DRM_MODE_DPMS_ON)
8512
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
2327 Serge 8513
 
2330 Serge 8514
		return true;
8515
	}
2327 Serge 8516
 
2330 Serge 8517
	/* Find an unused one (if possible) */
5060 serge 8518
	for_each_crtc(dev, possible_crtc) {
2330 Serge 8519
		i++;
8520
		if (!(encoder->possible_crtcs & (1 << i)))
8521
			continue;
5060 serge 8522
		if (possible_crtc->enabled)
8523
			continue;
8524
		/* This can occur when applying the pipe A quirk on resume. */
8525
		if (to_intel_crtc(possible_crtc)->new_enabled)
8526
			continue;
8527
 
2330 Serge 8528
			crtc = possible_crtc;
8529
			break;
8530
		}
2327 Serge 8531
 
2330 Serge 8532
	/*
8533
	 * If we didn't find an unused CRTC, don't use any.
8534
	 */
8535
	if (!crtc) {
8536
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
5060 serge 8537
		goto fail_unlock;
2330 Serge 8538
	}
2327 Serge 8539
 
5060 serge 8540
	ret = drm_modeset_lock(&crtc->mutex, ctx);
8541
	if (ret)
8542
		goto fail_unlock;
3031 serge 8543
	intel_encoder->new_crtc = to_intel_crtc(crtc);
8544
	to_intel_connector(connector)->new_encoder = intel_encoder;
2327 Serge 8545
 
2330 Serge 8546
	intel_crtc = to_intel_crtc(crtc);
5060 serge 8547
	intel_crtc->new_enabled = true;
8548
	intel_crtc->new_config = &intel_crtc->config;
3031 serge 8549
	old->dpms_mode = connector->dpms;
2330 Serge 8550
	old->load_detect_temp = true;
8551
	old->release_fb = NULL;
2327 Serge 8552
 
2330 Serge 8553
	if (!mode)
8554
		mode = &load_detect_mode;
2327 Serge 8555
 
2330 Serge 8556
	/* We need a framebuffer large enough to accommodate all accesses
8557
	 * that the plane may generate whilst we perform load detection.
8558
	 * We can not rely on the fbcon either being present (we get called
8559
	 * during its initialisation to detect all boot displays, or it may
8560
	 * not even exist) or that it is large enough to satisfy the
8561
	 * requested mode.
8562
	 */
3031 serge 8563
	fb = mode_fits_in_fbdev(dev, mode);
8564
	if (fb == NULL) {
2330 Serge 8565
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
3031 serge 8566
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
8567
		old->release_fb = fb;
2330 Serge 8568
	} else
8569
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
3031 serge 8570
	if (IS_ERR(fb)) {
2330 Serge 8571
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5060 serge 8572
		goto fail;
2330 Serge 8573
	}
2327 Serge 8574
 
3480 Serge 8575
	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
2330 Serge 8576
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
8577
		if (old->release_fb)
8578
			old->release_fb->funcs->destroy(old->release_fb);
5060 serge 8579
		goto fail;
2330 Serge 8580
	}
2327 Serge 8581
 
2330 Serge 8582
	/* let the connector get through one full cycle before testing */
8583
	intel_wait_for_vblank(dev, intel_crtc->pipe);
8584
	return true;
5060 serge 8585
 
8586
 fail:
8587
	intel_crtc->new_enabled = crtc->enabled;
8588
	if (intel_crtc->new_enabled)
8589
		intel_crtc->new_config = &intel_crtc->config;
8590
	else
8591
		intel_crtc->new_config = NULL;
8592
fail_unlock:
8593
	if (ret == -EDEADLK) {
8594
		drm_modeset_backoff(ctx);
8595
		goto retry;
8596
	}
8597
 
8598
	return false;
2330 Serge 8599
}
2327 Serge 8600
 
3031 serge 8601
void intel_release_load_detect_pipe(struct drm_connector *connector,
2330 Serge 8602
				    struct intel_load_detect_pipe *old)
8603
{
3031 serge 8604
	struct intel_encoder *intel_encoder =
8605
		intel_attached_encoder(connector);
2330 Serge 8606
	struct drm_encoder *encoder = &intel_encoder->base;
3480 Serge 8607
	struct drm_crtc *crtc = encoder->crtc;
5060 serge 8608
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 8609
 
2330 Serge 8610
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5060 serge 8611
		      connector->base.id, connector->name,
8612
		      encoder->base.id, encoder->name);
2327 Serge 8613
 
2330 Serge 8614
	if (old->load_detect_temp) {
3031 serge 8615
		to_intel_connector(connector)->new_encoder = NULL;
8616
		intel_encoder->new_crtc = NULL;
5060 serge 8617
		intel_crtc->new_enabled = false;
8618
		intel_crtc->new_config = NULL;
3031 serge 8619
		intel_set_mode(crtc, NULL, 0, 0, NULL);
8620
 
3480 Serge 8621
		if (old->release_fb) {
8622
			drm_framebuffer_unregister_private(old->release_fb);
8623
			drm_framebuffer_unreference(old->release_fb);
8624
		}
2327 Serge 8625
 
2330 Serge 8626
		return;
8627
	}
2327 Serge 8628
 
2330 Serge 8629
	/* Switch crtc and encoder back off if necessary */
3031 serge 8630
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
8631
		connector->funcs->dpms(connector, old->dpms_mode);
2330 Serge 8632
}
2327 Serge 8633
 
4560 Serge 8634
static int i9xx_pll_refclk(struct drm_device *dev,
8635
			   const struct intel_crtc_config *pipe_config)
8636
{
8637
	struct drm_i915_private *dev_priv = dev->dev_private;
8638
	u32 dpll = pipe_config->dpll_hw_state.dpll;
8639
 
8640
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8641
		return dev_priv->vbt.lvds_ssc_freq;
8642
	else if (HAS_PCH_SPLIT(dev))
8643
		return 120000;
8644
	else if (!IS_GEN2(dev))
8645
		return 96000;
8646
	else
8647
		return 48000;
8648
}
8649
 
2330 Serge 8650
/* Returns the clock of the currently programmed mode of the given pipe. */
4104 Serge 8651
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8652
				struct intel_crtc_config *pipe_config)
2330 Serge 8653
{
4104 Serge 8654
	struct drm_device *dev = crtc->base.dev;
2330 Serge 8655
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 8656
	int pipe = pipe_config->cpu_transcoder;
4560 Serge 8657
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2330 Serge 8658
	u32 fp;
8659
	intel_clock_t clock;
4560 Serge 8660
	int refclk = i9xx_pll_refclk(dev, pipe_config);
2327 Serge 8661
 
2330 Serge 8662
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4560 Serge 8663
		fp = pipe_config->dpll_hw_state.fp0;
2330 Serge 8664
	else
4560 Serge 8665
		fp = pipe_config->dpll_hw_state.fp1;
2327 Serge 8666
 
2330 Serge 8667
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8668
	if (IS_PINEVIEW(dev)) {
8669
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8670
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8671
	} else {
8672
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8673
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8674
	}
2327 Serge 8675
 
2330 Serge 8676
	if (!IS_GEN2(dev)) {
8677
		if (IS_PINEVIEW(dev))
8678
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8679
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8680
		else
8681
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8682
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 8683
 
2330 Serge 8684
		switch (dpll & DPLL_MODE_MASK) {
8685
		case DPLLB_MODE_DAC_SERIAL:
8686
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8687
				5 : 10;
8688
			break;
8689
		case DPLLB_MODE_LVDS:
8690
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8691
				7 : 14;
8692
			break;
8693
		default:
8694
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
8695
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
4104 Serge 8696
			return;
2330 Serge 8697
		}
2327 Serge 8698
 
4104 Serge 8699
		if (IS_PINEVIEW(dev))
4560 Serge 8700
			pineview_clock(refclk, &clock);
4104 Serge 8701
		else
4560 Serge 8702
			i9xx_clock(refclk, &clock);
2330 Serge 8703
	} else {
4560 Serge 8704
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8705
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
2327 Serge 8706
 
2330 Serge 8707
		if (is_lvds) {
8708
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8709
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
4560 Serge 8710
 
8711
			if (lvds & LVDS_CLKB_POWER_UP)
8712
				clock.p2 = 7;
8713
			else
2330 Serge 8714
			clock.p2 = 14;
8715
		} else {
8716
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
8717
				clock.p1 = 2;
8718
			else {
8719
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8720
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8721
			}
8722
			if (dpll & PLL_P2_DIVIDE_BY_4)
8723
				clock.p2 = 4;
8724
			else
8725
				clock.p2 = 2;
4560 Serge 8726
		}
2327 Serge 8727
 
4560 Serge 8728
		i9xx_clock(refclk, &clock);
2330 Serge 8729
	}
2327 Serge 8730
 
4560 Serge 8731
	/*
8732
	 * This value includes pixel_multiplier. We will use
8733
	 * port_clock to compute adjusted_mode.crtc_clock in the
8734
	 * encoder's get_config() function.
8735
	 */
8736
	pipe_config->port_clock = clock.dot;
4104 Serge 8737
}
8738
 
4560 Serge 8739
int intel_dotclock_calculate(int link_freq,
8740
			     const struct intel_link_m_n *m_n)
4104 Serge 8741
{
8742
	/*
8743
	 * The calculation for the data clock is:
4560 Serge 8744
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4104 Serge 8745
	 * But we want to avoid losing precison if possible, so:
4560 Serge 8746
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4104 Serge 8747
	 *
8748
	 * and the link clock is simpler:
4560 Serge 8749
	 * link_clock = (m * link_clock) / n
2330 Serge 8750
	 */
2327 Serge 8751
 
4560 Serge 8752
	if (!m_n->link_n)
8753
		return 0;
4104 Serge 8754
 
4560 Serge 8755
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8756
}
4104 Serge 8757
 
4560 Serge 8758
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8759
				   struct intel_crtc_config *pipe_config)
8760
{
8761
	struct drm_device *dev = crtc->base.dev;
4104 Serge 8762
 
4560 Serge 8763
	/* read out port_clock from the DPLL */
8764
	i9xx_crtc_clock_get(crtc, pipe_config);
4104 Serge 8765
 
4560 Serge 8766
	/*
8767
	 * This value does not include pixel_multiplier.
8768
	 * We will check that port_clock and adjusted_mode.crtc_clock
8769
	 * agree once we know their relationship in the encoder's
8770
	 * get_config() function.
8771
	 */
8772
	pipe_config->adjusted_mode.crtc_clock =
8773
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8774
					 &pipe_config->fdi_m_n);
2330 Serge 8775
}
2327 Serge 8776
 
2330 Serge 8777
/** Returns the currently programmed mode of the given pipe. */
8778
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8779
					     struct drm_crtc *crtc)
8780
{
8781
	struct drm_i915_private *dev_priv = dev->dev_private;
8782
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 8783
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
2330 Serge 8784
	struct drm_display_mode *mode;
4104 Serge 8785
	struct intel_crtc_config pipe_config;
3243 Serge 8786
	int htot = I915_READ(HTOTAL(cpu_transcoder));
8787
	int hsync = I915_READ(HSYNC(cpu_transcoder));
8788
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
8789
	int vsync = I915_READ(VSYNC(cpu_transcoder));
4560 Serge 8790
	enum pipe pipe = intel_crtc->pipe;
2327 Serge 8791
 
2330 Serge 8792
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8793
	if (!mode)
8794
		return NULL;
8795
 
4104 Serge 8796
	/*
8797
	 * Construct a pipe_config sufficient for getting the clock info
8798
	 * back out of crtc_clock_get.
8799
	 *
8800
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8801
	 * to use a real value here instead.
8802
	 */
4560 Serge 8803
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
4104 Serge 8804
	pipe_config.pixel_multiplier = 1;
4560 Serge 8805
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8806
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8807
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
4104 Serge 8808
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8809
 
4560 Serge 8810
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
2330 Serge 8811
	mode->hdisplay = (htot & 0xffff) + 1;
8812
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8813
	mode->hsync_start = (hsync & 0xffff) + 1;
8814
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8815
	mode->vdisplay = (vtot & 0xffff) + 1;
8816
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8817
	mode->vsync_start = (vsync & 0xffff) + 1;
8818
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8819
 
8820
	drm_mode_set_name(mode);
8821
 
8822
	return mode;
8823
}
8824
 
5060 serge 8825
static void intel_increase_pllclock(struct drm_device *dev,
8826
				    enum pipe pipe)
2327 Serge 8827
{
5060 serge 8828
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 8829
	int dpll_reg = DPLL(pipe);
8830
	int dpll;
8831
 
5060 serge 8832
	if (!HAS_GMCH_DISPLAY(dev))
2327 Serge 8833
		return;
8834
 
8835
	if (!dev_priv->lvds_downclock_avail)
8836
		return;
8837
 
8838
	dpll = I915_READ(dpll_reg);
8839
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
8840
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
8841
 
3031 serge 8842
		assert_panel_unlocked(dev_priv, pipe);
2327 Serge 8843
 
8844
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
8845
		I915_WRITE(dpll_reg, dpll);
8846
		intel_wait_for_vblank(dev, pipe);
8847
 
8848
		dpll = I915_READ(dpll_reg);
8849
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
8850
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
8851
	}
8852
}
8853
 
3031 serge 8854
static void intel_decrease_pllclock(struct drm_crtc *crtc)
8855
{
8856
	struct drm_device *dev = crtc->dev;
5060 serge 8857
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 8858
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 8859
 
5060 serge 8860
	if (!HAS_GMCH_DISPLAY(dev))
3031 serge 8861
		return;
2327 Serge 8862
 
3031 serge 8863
	if (!dev_priv->lvds_downclock_avail)
8864
		return;
2327 Serge 8865
 
3031 serge 8866
	/*
8867
	 * Since this is called by a timer, we should never get here in
8868
	 * the manual case.
8869
	 */
8870
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
8871
		int pipe = intel_crtc->pipe;
8872
		int dpll_reg = DPLL(pipe);
8873
		int dpll;
2327 Serge 8874
 
3031 serge 8875
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
2327 Serge 8876
 
3031 serge 8877
		assert_panel_unlocked(dev_priv, pipe);
2327 Serge 8878
 
3031 serge 8879
		dpll = I915_READ(dpll_reg);
8880
		dpll |= DISPLAY_RATE_SELECT_FPA1;
8881
		I915_WRITE(dpll_reg, dpll);
8882
		intel_wait_for_vblank(dev, pipe);
8883
		dpll = I915_READ(dpll_reg);
8884
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
8885
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
8886
	}
2327 Serge 8887
 
3031 serge 8888
}
2327 Serge 8889
 
3031 serge 8890
void intel_mark_busy(struct drm_device *dev)
8891
{
4104 Serge 8892
	struct drm_i915_private *dev_priv = dev->dev_private;
8893
 
5060 serge 8894
	if (dev_priv->mm.busy)
8895
		return;
8896
 
8897
	intel_runtime_pm_get(dev_priv);
4104 Serge 8898
	i915_update_gfx_val(dev_priv);
5060 serge 8899
	dev_priv->mm.busy = true;
3031 serge 8900
}
2327 Serge 8901
 
3031 serge 8902
void intel_mark_idle(struct drm_device *dev)
8903
{
4104 Serge 8904
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 8905
	struct drm_crtc *crtc;
2327 Serge 8906
 
5060 serge 8907
	if (!dev_priv->mm.busy)
3031 serge 8908
		return;
2327 Serge 8909
 
5060 serge 8910
	dev_priv->mm.busy = false;
8911
 
8912
	if (!i915.powersave)
8913
		goto out;
8914
 
8915
	for_each_crtc(dev, crtc) {
8916
		if (!crtc->primary->fb)
3031 serge 8917
			continue;
2327 Serge 8918
 
3480 Serge 8919
		intel_decrease_pllclock(crtc);
3031 serge 8920
	}
4560 Serge 8921
 
5060 serge 8922
	if (INTEL_INFO(dev)->gen >= 6)
4560 Serge 8923
		gen6_rps_idle(dev->dev_private);
5060 serge 8924
 
8925
out:
8926
	intel_runtime_pm_put(dev_priv);
3031 serge 8927
}
2327 Serge 8928
 
5060 serge 8929
 
8930
/**
8931
 * intel_mark_fb_busy - mark given planes as busy
8932
 * @dev: DRM device
8933
 * @frontbuffer_bits: bits for the affected planes
8934
 * @ring: optional ring for asynchronous commands
8935
 *
8936
 * This function gets called every time the screen contents change. It can be
8937
 * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
8938
 */
8939
static void intel_mark_fb_busy(struct drm_device *dev,
8940
			       unsigned frontbuffer_bits,
8941
			struct intel_engine_cs *ring)
3031 serge 8942
{
5060 serge 8943
	enum pipe pipe;
2327 Serge 8944
 
5060 serge 8945
	if (!i915.powersave)
3031 serge 8946
		return;
2327 Serge 8947
 
5060 serge 8948
	for_each_pipe(pipe) {
8949
		if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
3031 serge 8950
			continue;
2327 Serge 8951
 
5060 serge 8952
		intel_increase_pllclock(dev, pipe);
4104 Serge 8953
		if (ring && intel_fbc_enabled(dev))
8954
			ring->fbc_dirty = true;
3031 serge 8955
	}
8956
}
2327 Serge 8957
 
5060 serge 8958
/**
8959
 * intel_fb_obj_invalidate - invalidate frontbuffer object
8960
 * @obj: GEM object to invalidate
8961
 * @ring: set for asynchronous rendering
8962
 *
8963
 * This function gets called every time rendering on the given object starts and
8964
 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
8965
 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
8966
 * until the rendering completes or a flip on this frontbuffer plane is
8967
 * scheduled.
8968
 */
8969
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
8970
			     struct intel_engine_cs *ring)
8971
{
8972
	struct drm_device *dev = obj->base.dev;
8973
	struct drm_i915_private *dev_priv = dev->dev_private;
8974
 
8975
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8976
 
8977
	if (!obj->frontbuffer_bits)
8978
		return;
8979
 
8980
	if (ring) {
8981
		mutex_lock(&dev_priv->fb_tracking.lock);
8982
		dev_priv->fb_tracking.busy_bits
8983
			|= obj->frontbuffer_bits;
8984
		dev_priv->fb_tracking.flip_bits
8985
			&= ~obj->frontbuffer_bits;
8986
		mutex_unlock(&dev_priv->fb_tracking.lock);
8987
	}
8988
 
8989
	intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
8990
 
8991
	intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
8992
}
8993
 
8994
/**
8995
 * intel_frontbuffer_flush - flush frontbuffer
8996
 * @dev: DRM device
8997
 * @frontbuffer_bits: frontbuffer plane tracking bits
8998
 *
8999
 * This function gets called every time rendering on the given planes has
9000
 * completed and frontbuffer caching can be started again. Flushes will get
9001
 * delayed if they're blocked by some oustanding asynchronous rendering.
9002
 *
9003
 * Can be called without any locks held.
9004
 */
9005
void intel_frontbuffer_flush(struct drm_device *dev,
9006
			     unsigned frontbuffer_bits)
9007
{
9008
	struct drm_i915_private *dev_priv = dev->dev_private;
9009
 
9010
	/* Delay flushing when rings are still busy.*/
9011
	mutex_lock(&dev_priv->fb_tracking.lock);
9012
	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
9013
	mutex_unlock(&dev_priv->fb_tracking.lock);
9014
 
9015
	intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
9016
 
9017
	intel_edp_psr_flush(dev, frontbuffer_bits);
9018
}
2330 Serge 9019
static void intel_crtc_destroy(struct drm_crtc *crtc)
9020
{
9021
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9022
	struct drm_device *dev = crtc->dev;
9023
	struct intel_unpin_work *work;
9024
	unsigned long flags;
2327 Serge 9025
 
2330 Serge 9026
	spin_lock_irqsave(&dev->event_lock, flags);
9027
	work = intel_crtc->unpin_work;
9028
	intel_crtc->unpin_work = NULL;
9029
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 9030
 
2330 Serge 9031
	if (work) {
4293 Serge 9032
		cancel_work_sync(&work->work);
2330 Serge 9033
		kfree(work);
9034
	}
2327 Serge 9035
 
2330 Serge 9036
	drm_crtc_cleanup(crtc);
2327 Serge 9037
 
2330 Serge 9038
	kfree(intel_crtc);
9039
}
2327 Serge 9040
 
3031 serge 9041
#if 0
9042
static void intel_unpin_work_fn(struct work_struct *__work)
9043
{
9044
	struct intel_unpin_work *work =
9045
		container_of(__work, struct intel_unpin_work, work);
3243 Serge 9046
	struct drm_device *dev = work->crtc->dev;
5060 serge 9047
	enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
2327 Serge 9048
 
3243 Serge 9049
	mutex_lock(&dev->struct_mutex);
3031 serge 9050
	intel_unpin_fb_obj(work->old_fb_obj);
9051
	drm_gem_object_unreference(&work->pending_flip_obj->base);
9052
	drm_gem_object_unreference(&work->old_fb_obj->base);
2327 Serge 9053
 
3243 Serge 9054
	intel_update_fbc(dev);
9055
	mutex_unlock(&dev->struct_mutex);
9056
 
9057
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
9058
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
9059
 
3031 serge 9060
	kfree(work);
9061
}
2327 Serge 9062
 
3031 serge 9063
static void do_intel_finish_page_flip(struct drm_device *dev,
9064
				      struct drm_crtc *crtc)
9065
{
5060 serge 9066
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9067
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9068
	struct intel_unpin_work *work;
9069
	unsigned long flags;
2327 Serge 9070
 
3031 serge 9071
	/* Ignore early vblank irqs */
9072
	if (intel_crtc == NULL)
9073
		return;
2327 Serge 9074
 
3031 serge 9075
	spin_lock_irqsave(&dev->event_lock, flags);
9076
	work = intel_crtc->unpin_work;
3243 Serge 9077
 
9078
	/* Ensure we don't miss a work->pending update ... */
9079
	smp_rmb();
9080
 
9081
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
3031 serge 9082
		spin_unlock_irqrestore(&dev->event_lock, flags);
9083
		return;
9084
	}
2327 Serge 9085
 
3243 Serge 9086
	/* and that the unpin work is consistent wrt ->pending. */
9087
	smp_rmb();
9088
 
3031 serge 9089
	intel_crtc->unpin_work = NULL;
2327 Serge 9090
 
3243 Serge 9091
	if (work->event)
9092
		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
2327 Serge 9093
 
5060 serge 9094
	drm_crtc_vblank_put(crtc);
2327 Serge 9095
 
3031 serge 9096
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 9097
 
3480 Serge 9098
	wake_up_all(&dev_priv->pending_flip_queue);
2327 Serge 9099
 
3243 Serge 9100
	queue_work(dev_priv->wq, &work->work);
9101
 
3031 serge 9102
	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
9103
}
2327 Serge 9104
 
3031 serge 9105
void intel_finish_page_flip(struct drm_device *dev, int pipe)
9106
{
5060 serge 9107
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9108
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2327 Serge 9109
 
3031 serge 9110
	do_intel_finish_page_flip(dev, crtc);
9111
}
2327 Serge 9112
 
3031 serge 9113
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
9114
{
5060 serge 9115
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9116
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
2327 Serge 9117
 
3031 serge 9118
	do_intel_finish_page_flip(dev, crtc);
9119
}
2327 Serge 9120
 
5060 serge 9121
/* Is 'a' after or equal to 'b'? */
9122
static bool g4x_flip_count_after_eq(u32 a, u32 b)
9123
{
9124
	return !((a - b) & 0x80000000);
9125
}
9126
 
9127
static bool page_flip_finished(struct intel_crtc *crtc)
9128
{
9129
	struct drm_device *dev = crtc->base.dev;
9130
	struct drm_i915_private *dev_priv = dev->dev_private;
9131
 
9132
	/*
9133
	 * The relevant registers doen't exist on pre-ctg.
9134
	 * As the flip done interrupt doesn't trigger for mmio
9135
	 * flips on gmch platforms, a flip count check isn't
9136
	 * really needed there. But since ctg has the registers,
9137
	 * include it in the check anyway.
9138
	 */
9139
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
9140
		return true;
9141
 
9142
	/*
9143
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
9144
	 * used the same base address. In that case the mmio flip might
9145
	 * have completed, but the CS hasn't even executed the flip yet.
9146
	 *
9147
	 * A flip count check isn't enough as the CS might have updated
9148
	 * the base address just after start of vblank, but before we
9149
	 * managed to process the interrupt. This means we'd complete the
9150
	 * CS flip too soon.
9151
	 *
9152
	 * Combining both checks should get us a good enough result. It may
9153
	 * still happen that the CS flip has been executed, but has not
9154
	 * yet actually completed. But in case the base address is the same
9155
	 * anyway, we don't really care.
9156
	 */
9157
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
9158
		crtc->unpin_work->gtt_offset &&
9159
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
9160
				    crtc->unpin_work->flip_count);
9161
}
9162
 
3031 serge 9163
void intel_prepare_page_flip(struct drm_device *dev, int plane)
9164
{
5060 serge 9165
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9166
	struct intel_crtc *intel_crtc =
9167
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9168
	unsigned long flags;
2327 Serge 9169
 
3243 Serge 9170
	/* NB: An MMIO update of the plane base pointer will also
9171
	 * generate a page-flip completion irq, i.e. every modeset
9172
	 * is also accompanied by a spurious intel_prepare_page_flip().
9173
	 */
3031 serge 9174
	spin_lock_irqsave(&dev->event_lock, flags);
5060 serge 9175
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
3243 Serge 9176
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
3031 serge 9177
	spin_unlock_irqrestore(&dev->event_lock, flags);
9178
}
2327 Serge 9179
 
5060 serge 9180
static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
3243 Serge 9181
{
9182
	/* Ensure that the work item is consistent when activating it ... */
9183
	smp_wmb();
9184
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
9185
	/* and that it is marked active as soon as the irq could fire. */
9186
	smp_wmb();
9187
}
9188
 
3031 serge 9189
static int intel_gen2_queue_flip(struct drm_device *dev,
9190
				 struct drm_crtc *crtc,
9191
				 struct drm_framebuffer *fb,
4104 Serge 9192
				 struct drm_i915_gem_object *obj,
5060 serge 9193
				 struct intel_engine_cs *ring,
4104 Serge 9194
				 uint32_t flags)
3031 serge 9195
{
9196
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9197
	u32 flip_mask;
9198
	int ret;
2327 Serge 9199
 
3031 serge 9200
	ret = intel_ring_begin(ring, 6);
9201
	if (ret)
5060 serge 9202
		return ret;
2327 Serge 9203
 
3031 serge 9204
	/* Can't queue multiple flips, so wait for the previous
9205
	 * one to finish before executing the next.
9206
	 */
9207
	if (intel_crtc->plane)
9208
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9209
	else
9210
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9211
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9212
	intel_ring_emit(ring, MI_NOOP);
9213
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9214
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9215
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 9216
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 9217
	intel_ring_emit(ring, 0); /* aux display base address, unused */
3243 Serge 9218
 
9219
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9220
	__intel_ring_advance(ring);
3031 serge 9221
	return 0;
9222
}
2327 Serge 9223
 
3031 serge 9224
static int intel_gen3_queue_flip(struct drm_device *dev,
9225
				 struct drm_crtc *crtc,
9226
				 struct drm_framebuffer *fb,
4104 Serge 9227
				 struct drm_i915_gem_object *obj,
5060 serge 9228
				 struct intel_engine_cs *ring,
4104 Serge 9229
				 uint32_t flags)
3031 serge 9230
{
9231
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9232
	u32 flip_mask;
9233
	int ret;
2327 Serge 9234
 
3031 serge 9235
	ret = intel_ring_begin(ring, 6);
9236
	if (ret)
5060 serge 9237
		return ret;
2327 Serge 9238
 
3031 serge 9239
	if (intel_crtc->plane)
9240
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9241
	else
9242
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9243
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9244
	intel_ring_emit(ring, MI_NOOP);
9245
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
9246
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9247
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 9248
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 9249
	intel_ring_emit(ring, MI_NOOP);
2327 Serge 9250
 
3243 Serge 9251
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9252
	__intel_ring_advance(ring);
3031 serge 9253
	return 0;
9254
}
2327 Serge 9255
 
3031 serge 9256
static int intel_gen4_queue_flip(struct drm_device *dev,
9257
				 struct drm_crtc *crtc,
9258
				 struct drm_framebuffer *fb,
4104 Serge 9259
				 struct drm_i915_gem_object *obj,
5060 serge 9260
				 struct intel_engine_cs *ring,
4104 Serge 9261
				 uint32_t flags)
3031 serge 9262
{
9263
	struct drm_i915_private *dev_priv = dev->dev_private;
9264
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9265
	uint32_t pf, pipesrc;
9266
	int ret;
2327 Serge 9267
 
3031 serge 9268
	ret = intel_ring_begin(ring, 4);
9269
	if (ret)
5060 serge 9270
		return ret;
2327 Serge 9271
 
3031 serge 9272
	/* i965+ uses the linear or tiled offsets from the
9273
	 * Display Registers (which do not change across a page-flip)
9274
	 * so we need only reprogram the base address.
9275
	 */
9276
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9277
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9278
	intel_ring_emit(ring, fb->pitches[0]);
5060 serge 9279
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
3031 serge 9280
			obj->tiling_mode);
2327 Serge 9281
 
3031 serge 9282
	/* XXX Enabling the panel-fitter across page-flip is so far
9283
	 * untested on non-native modes, so ignore it for now.
9284
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
9285
	 */
9286
	pf = 0;
9287
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9288
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 9289
 
9290
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9291
	__intel_ring_advance(ring);
3031 serge 9292
	return 0;
9293
}
2327 Serge 9294
 
3031 serge 9295
static int intel_gen6_queue_flip(struct drm_device *dev,
9296
				 struct drm_crtc *crtc,
9297
				 struct drm_framebuffer *fb,
4104 Serge 9298
				 struct drm_i915_gem_object *obj,
5060 serge 9299
				 struct intel_engine_cs *ring,
4104 Serge 9300
				 uint32_t flags)
3031 serge 9301
{
9302
	struct drm_i915_private *dev_priv = dev->dev_private;
9303
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9304
	uint32_t pf, pipesrc;
9305
	int ret;
2327 Serge 9306
 
3031 serge 9307
	ret = intel_ring_begin(ring, 4);
9308
	if (ret)
5060 serge 9309
		return ret;
2327 Serge 9310
 
3031 serge 9311
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9312
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9313
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
5060 serge 9314
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
2327 Serge 9315
 
3031 serge 9316
	/* Contrary to the suggestions in the documentation,
9317
	 * "Enable Panel Fitter" does not seem to be required when page
9318
	 * flipping with a non-native mode, and worse causes a normal
9319
	 * modeset to fail.
9320
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
9321
	 */
9322
	pf = 0;
9323
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9324
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 9325
 
9326
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9327
	__intel_ring_advance(ring);
3031 serge 9328
	return 0;
9329
}
2327 Serge 9330
 
3031 serge 9331
static int intel_gen7_queue_flip(struct drm_device *dev,
9332
				 struct drm_crtc *crtc,
9333
				 struct drm_framebuffer *fb,
4104 Serge 9334
				 struct drm_i915_gem_object *obj,
5060 serge 9335
				 struct intel_engine_cs *ring,
4104 Serge 9336
				 uint32_t flags)
3031 serge 9337
{
9338
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9339
	uint32_t plane_bit = 0;
4104 Serge 9340
	int len, ret;
2327 Serge 9341
 
5060 serge 9342
	switch (intel_crtc->plane) {
3031 serge 9343
	case PLANE_A:
9344
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
9345
		break;
9346
	case PLANE_B:
9347
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
9348
		break;
9349
	case PLANE_C:
9350
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
9351
		break;
9352
	default:
9353
		WARN_ONCE(1, "unknown plane in flip command\n");
5060 serge 9354
		return -ENODEV;
3031 serge 9355
	}
2327 Serge 9356
 
4104 Serge 9357
	len = 4;
5060 serge 9358
	if (ring->id == RCS) {
4104 Serge 9359
		len += 6;
5060 serge 9360
		/*
9361
		 * On Gen 8, SRM is now taking an extra dword to accommodate
9362
		 * 48bits addresses, and we need a NOOP for the batch size to
9363
		 * stay even.
9364
		 */
9365
		if (IS_GEN8(dev))
9366
			len += 2;
9367
	}
4104 Serge 9368
 
5060 serge 9369
	/*
9370
	 * BSpec MI_DISPLAY_FLIP for IVB:
9371
	 * "The full packet must be contained within the same cache line."
9372
	 *
9373
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
9374
	 * cacheline, if we ever start emitting more commands before
9375
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
9376
	 * then do the cacheline alignment, and finally emit the
9377
	 * MI_DISPLAY_FLIP.
9378
	 */
9379
	ret = intel_ring_cacheline_align(ring);
9380
	if (ret)
9381
		return ret;
9382
 
4104 Serge 9383
	ret = intel_ring_begin(ring, len);
3031 serge 9384
	if (ret)
5060 serge 9385
		return ret;
2327 Serge 9386
 
4104 Serge 9387
	/* Unmask the flip-done completion message. Note that the bspec says that
9388
	 * we should do this for both the BCS and RCS, and that we must not unmask
9389
	 * more than one flip event at any time (or ensure that one flip message
9390
	 * can be sent by waiting for flip-done prior to queueing new flips).
9391
	 * Experimentation says that BCS works despite DERRMR masking all
9392
	 * flip-done completion events and that unmasking all planes at once
9393
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
9394
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
9395
	 */
9396
	if (ring->id == RCS) {
9397
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9398
		intel_ring_emit(ring, DERRMR);
9399
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9400
					DERRMR_PIPEB_PRI_FLIP_DONE |
9401
					DERRMR_PIPEC_PRI_FLIP_DONE));
5060 serge 9402
		if (IS_GEN8(dev))
9403
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9404
					      MI_SRM_LRM_GLOBAL_GTT);
9405
		else
4560 Serge 9406
		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9407
				MI_SRM_LRM_GLOBAL_GTT);
4104 Serge 9408
		intel_ring_emit(ring, DERRMR);
9409
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
5060 serge 9410
		if (IS_GEN8(dev)) {
9411
			intel_ring_emit(ring, 0);
9412
			intel_ring_emit(ring, MI_NOOP);
9413
		}
4104 Serge 9414
	}
9415
 
3031 serge 9416
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
9417
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
5060 serge 9418
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
3031 serge 9419
	intel_ring_emit(ring, (MI_NOOP));
3243 Serge 9420
 
9421
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 9422
	__intel_ring_advance(ring);
3031 serge 9423
	return 0;
9424
}
2327 Serge 9425
 
3031 serge 9426
static int intel_default_queue_flip(struct drm_device *dev,
9427
				    struct drm_crtc *crtc,
9428
				    struct drm_framebuffer *fb,
4104 Serge 9429
				    struct drm_i915_gem_object *obj,
5060 serge 9430
				    struct intel_engine_cs *ring,
4104 Serge 9431
				    uint32_t flags)
3031 serge 9432
{
9433
	return -ENODEV;
9434
}
2327 Serge 9435
 
3031 serge 9436
static int intel_crtc_page_flip(struct drm_crtc *crtc,
9437
				struct drm_framebuffer *fb,
4104 Serge 9438
				struct drm_pending_vblank_event *event,
9439
				uint32_t page_flip_flags)
3031 serge 9440
{
9441
	struct drm_device *dev = crtc->dev;
9442
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 9443
	struct drm_framebuffer *old_fb = crtc->primary->fb;
9444
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3031 serge 9445
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5060 serge 9446
	enum pipe pipe = intel_crtc->pipe;
3031 serge 9447
	struct intel_unpin_work *work;
5060 serge 9448
	struct intel_engine_cs *ring;
3031 serge 9449
	unsigned long flags;
9450
	int ret;
2327 Serge 9451
 
5060 serge 9452
	/*
9453
	 * drm_mode_page_flip_ioctl() should already catch this, but double
9454
	 * check to be safe.  In the future we may enable pageflipping from
9455
	 * a disabled primary plane.
9456
	 */
9457
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
9458
		return -EBUSY;
9459
 
3031 serge 9460
	/* Can't change pixel format via MI display flips. */
5060 serge 9461
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
3031 serge 9462
		return -EINVAL;
2327 Serge 9463
 
3031 serge 9464
	/*
9465
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
9466
	 * Note that pitch changes could also affect these register.
9467
	 */
9468
	if (INTEL_INFO(dev)->gen > 3 &&
5060 serge 9469
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
9470
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
3031 serge 9471
		return -EINVAL;
2327 Serge 9472
 
4560 Serge 9473
	work = kzalloc(sizeof(*work), GFP_KERNEL);
3031 serge 9474
	if (work == NULL)
9475
		return -ENOMEM;
2327 Serge 9476
 
3031 serge 9477
	work->event = event;
3243 Serge 9478
	work->crtc = crtc;
5060 serge 9479
	work->old_fb_obj = intel_fb_obj(old_fb);
3031 serge 9480
	INIT_WORK(&work->work, intel_unpin_work_fn);
2327 Serge 9481
 
5060 serge 9482
	ret = drm_crtc_vblank_get(crtc);
3031 serge 9483
	if (ret)
9484
		goto free_work;
2327 Serge 9485
 
3031 serge 9486
	/* We borrow the event spin lock for protecting unpin_work */
9487
	spin_lock_irqsave(&dev->event_lock, flags);
9488
	if (intel_crtc->unpin_work) {
9489
		spin_unlock_irqrestore(&dev->event_lock, flags);
9490
		kfree(work);
5060 serge 9491
		drm_crtc_vblank_put(crtc);
2327 Serge 9492
 
3031 serge 9493
		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9494
		return -EBUSY;
9495
	}
9496
	intel_crtc->unpin_work = work;
9497
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 9498
 
3243 Serge 9499
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9500
		flush_workqueue(dev_priv->wq);
9501
 
3031 serge 9502
	ret = i915_mutex_lock_interruptible(dev);
9503
	if (ret)
9504
		goto cleanup;
2327 Serge 9505
 
3031 serge 9506
	/* Reference the objects for the scheduled work. */
9507
	drm_gem_object_reference(&work->old_fb_obj->base);
9508
	drm_gem_object_reference(&obj->base);
2327 Serge 9509
 
5060 serge 9510
	crtc->primary->fb = fb;
2327 Serge 9511
 
3031 serge 9512
	work->pending_flip_obj = obj;
2327 Serge 9513
 
3031 serge 9514
	work->enable_stall_check = true;
9515
 
3243 Serge 9516
	atomic_inc(&intel_crtc->unpin_work_count);
3480 Serge 9517
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3031 serge 9518
 
5060 serge 9519
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9520
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
9521
 
9522
	if (IS_VALLEYVIEW(dev)) {
9523
		ring = &dev_priv->ring[BCS];
9524
		if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
9525
			/* vlv: DISPLAY_FLIP fails to change tiling */
9526
			ring = NULL;
9527
	} else if (IS_IVYBRIDGE(dev)) {
9528
		ring = &dev_priv->ring[BCS];
9529
	} else if (INTEL_INFO(dev)->gen >= 7) {
9530
		ring = obj->ring;
9531
		if (ring == NULL || ring->id != RCS)
9532
			ring = &dev_priv->ring[BCS];
9533
	} else {
9534
		ring = &dev_priv->ring[RCS];
9535
	}
9536
 
9537
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
3031 serge 9538
	if (ret)
9539
		goto cleanup_pending;
9540
 
5060 serge 9541
	work->gtt_offset =
9542
		i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9543
 
9544
	if (use_mmio_flip(ring, obj))
9545
		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
9546
					    page_flip_flags);
9547
	else
9548
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9549
				page_flip_flags);
9550
	if (ret)
9551
		goto cleanup_unpin;
9552
 
9553
	i915_gem_track_fb(work->old_fb_obj, obj,
9554
			  INTEL_FRONTBUFFER_PRIMARY(pipe));
9555
 
3031 serge 9556
	intel_disable_fbc(dev);
5060 serge 9557
	intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
3031 serge 9558
	mutex_unlock(&dev->struct_mutex);
9559
 
9560
	trace_i915_flip_request(intel_crtc->plane, obj);
9561
 
9562
	return 0;
9563
 
5060 serge 9564
cleanup_unpin:
9565
	intel_unpin_fb_obj(obj);
3031 serge 9566
cleanup_pending:
3243 Serge 9567
	atomic_dec(&intel_crtc->unpin_work_count);
5060 serge 9568
	crtc->primary->fb = old_fb;
3031 serge 9569
	drm_gem_object_unreference(&work->old_fb_obj->base);
9570
	drm_gem_object_unreference(&obj->base);
9571
	mutex_unlock(&dev->struct_mutex);
9572
 
9573
cleanup:
9574
	spin_lock_irqsave(&dev->event_lock, flags);
9575
	intel_crtc->unpin_work = NULL;
9576
	spin_unlock_irqrestore(&dev->event_lock, flags);
9577
 
5060 serge 9578
	drm_crtc_vblank_put(crtc);
3031 serge 9579
free_work:
9580
	kfree(work);
9581
 
5060 serge 9582
	if (ret == -EIO) {
9583
out_hang:
9584
		intel_crtc_wait_for_pending_flips(crtc);
9585
		ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
9586
		if (ret == 0 && event)
9587
			drm_send_vblank_event(dev, pipe, event);
9588
	}
3031 serge 9589
	return ret;
9590
}
9591
#endif
9592
 
9593
static struct drm_crtc_helper_funcs intel_helper_funcs = {
9594
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
9595
	.load_lut = intel_crtc_load_lut,
9596
};
9597
 
9598
/**
9599
 * intel_modeset_update_staged_output_state
9600
 *
9601
 * Updates the staged output configuration state, e.g. after we've read out the
9602
 * current hw state.
9603
 */
9604
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9605
{
5060 serge 9606
	struct intel_crtc *crtc;
3031 serge 9607
	struct intel_encoder *encoder;
9608
	struct intel_connector *connector;
9609
 
9610
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9611
			    base.head) {
9612
		connector->new_encoder =
9613
			to_intel_encoder(connector->base.encoder);
9614
	}
9615
 
9616
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9617
			    base.head) {
9618
		encoder->new_crtc =
9619
			to_intel_crtc(encoder->base.crtc);
9620
	}
5060 serge 9621
 
9622
	for_each_intel_crtc(dev, crtc) {
9623
		crtc->new_enabled = crtc->base.enabled;
9624
 
9625
		if (crtc->new_enabled)
9626
			crtc->new_config = &crtc->config;
9627
		else
9628
			crtc->new_config = NULL;
9629
	}
3031 serge 9630
}
9631
 
9632
/**
9633
 * intel_modeset_commit_output_state
9634
 *
9635
 * This function copies the stage display pipe configuration to the real one.
9636
 */
9637
static void intel_modeset_commit_output_state(struct drm_device *dev)
9638
{
5060 serge 9639
	struct intel_crtc *crtc;
3031 serge 9640
	struct intel_encoder *encoder;
9641
	struct intel_connector *connector;
9642
 
9643
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9644
			    base.head) {
9645
		connector->base.encoder = &connector->new_encoder->base;
9646
	}
9647
 
9648
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9649
			    base.head) {
9650
		encoder->base.crtc = &encoder->new_crtc->base;
9651
	}
5060 serge 9652
 
9653
	for_each_intel_crtc(dev, crtc) {
9654
		crtc->base.enabled = crtc->new_enabled;
9655
	}
3031 serge 9656
}
9657
 
4104 Serge 9658
static void
5060 serge 9659
connected_sink_compute_bpp(struct intel_connector *connector,
4104 Serge 9660
			   struct intel_crtc_config *pipe_config)
9661
{
9662
	int bpp = pipe_config->pipe_bpp;
9663
 
9664
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
9665
		connector->base.base.id,
5060 serge 9666
		connector->base.name);
4104 Serge 9667
 
9668
	/* Don't use an invalid EDID bpc value */
9669
	if (connector->base.display_info.bpc &&
9670
	    connector->base.display_info.bpc * 3 < bpp) {
9671
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
9672
			      bpp, connector->base.display_info.bpc*3);
9673
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
9674
	}
9675
 
9676
	/* Clamp bpp to 8 on screens without EDID 1.4 */
9677
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
9678
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
9679
			      bpp);
9680
		pipe_config->pipe_bpp = 24;
9681
	}
9682
}
9683
 
3746 Serge 9684
static int
4104 Serge 9685
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
3746 Serge 9686
		    struct drm_framebuffer *fb,
9687
		    struct intel_crtc_config *pipe_config)
9688
{
4104 Serge 9689
	struct drm_device *dev = crtc->base.dev;
9690
	struct intel_connector *connector;
3746 Serge 9691
	int bpp;
9692
 
9693
	switch (fb->pixel_format) {
9694
	case DRM_FORMAT_C8:
9695
		bpp = 8*3; /* since we go through a colormap */
9696
		break;
9697
	case DRM_FORMAT_XRGB1555:
9698
	case DRM_FORMAT_ARGB1555:
9699
		/* checked in intel_framebuffer_init already */
9700
		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
9701
			return -EINVAL;
9702
	case DRM_FORMAT_RGB565:
9703
		bpp = 6*3; /* min is 18bpp */
9704
		break;
9705
	case DRM_FORMAT_XBGR8888:
9706
	case DRM_FORMAT_ABGR8888:
9707
		/* checked in intel_framebuffer_init already */
9708
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9709
			return -EINVAL;
9710
	case DRM_FORMAT_XRGB8888:
9711
	case DRM_FORMAT_ARGB8888:
9712
		bpp = 8*3;
9713
		break;
9714
	case DRM_FORMAT_XRGB2101010:
9715
	case DRM_FORMAT_ARGB2101010:
9716
	case DRM_FORMAT_XBGR2101010:
9717
	case DRM_FORMAT_ABGR2101010:
9718
		/* checked in intel_framebuffer_init already */
9719
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9720
			return -EINVAL;
9721
		bpp = 10*3;
9722
		break;
9723
	/* TODO: gen4+ supports 16 bpc floating point, too. */
9724
	default:
9725
		DRM_DEBUG_KMS("unsupported depth\n");
9726
		return -EINVAL;
9727
	}
9728
 
9729
	pipe_config->pipe_bpp = bpp;
9730
 
9731
	/* Clamp display bpp to EDID value */
9732
	list_for_each_entry(connector, &dev->mode_config.connector_list,
4104 Serge 9733
			    base.head) {
9734
		if (!connector->new_encoder ||
9735
		    connector->new_encoder->new_crtc != crtc)
3746 Serge 9736
			continue;
9737
 
4104 Serge 9738
		connected_sink_compute_bpp(connector, pipe_config);
3746 Serge 9739
	}
9740
 
9741
	return bpp;
9742
}
9743
 
4560 Serge 9744
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
9745
{
9746
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
9747
			"type: 0x%x flags: 0x%x\n",
9748
		mode->crtc_clock,
9749
		mode->crtc_hdisplay, mode->crtc_hsync_start,
9750
		mode->crtc_hsync_end, mode->crtc_htotal,
9751
		mode->crtc_vdisplay, mode->crtc_vsync_start,
9752
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
9753
}
9754
 
4104 Serge 9755
static void intel_dump_pipe_config(struct intel_crtc *crtc,
9756
				   struct intel_crtc_config *pipe_config,
9757
				   const char *context)
9758
{
9759
	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
9760
		      context, pipe_name(crtc->pipe));
9761
 
9762
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
9763
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
9764
		      pipe_config->pipe_bpp, pipe_config->dither);
9765
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9766
		      pipe_config->has_pch_encoder,
9767
		      pipe_config->fdi_lanes,
9768
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
9769
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
9770
		      pipe_config->fdi_m_n.tu);
4560 Serge 9771
	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9772
		      pipe_config->has_dp_encoder,
9773
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
9774
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
9775
		      pipe_config->dp_m_n.tu);
4104 Serge 9776
	DRM_DEBUG_KMS("requested mode:\n");
9777
	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
9778
	DRM_DEBUG_KMS("adjusted mode:\n");
9779
	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
4560 Serge 9780
	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
9781
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
9782
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
9783
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
4104 Serge 9784
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
9785
		      pipe_config->gmch_pfit.control,
9786
		      pipe_config->gmch_pfit.pgm_ratios,
9787
		      pipe_config->gmch_pfit.lvds_border_bits);
9788
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
9789
		      pipe_config->pch_pfit.pos,
9790
		      pipe_config->pch_pfit.size,
9791
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
9792
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
4560 Serge 9793
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
4104 Serge 9794
}
9795
 
5060 serge 9796
static bool encoders_cloneable(const struct intel_encoder *a,
9797
			       const struct intel_encoder *b)
4104 Serge 9798
{
5060 serge 9799
	/* masks could be asymmetric, so check both ways */
9800
	return a == b || (a->cloneable & (1 << b->type) &&
9801
			  b->cloneable & (1 << a->type));
9802
}
9803
 
9804
static bool check_single_encoder_cloning(struct intel_crtc *crtc,
9805
					 struct intel_encoder *encoder)
9806
{
9807
	struct drm_device *dev = crtc->base.dev;
9808
	struct intel_encoder *source_encoder;
9809
 
9810
	list_for_each_entry(source_encoder,
9811
			    &dev->mode_config.encoder_list, base.head) {
9812
		if (source_encoder->new_crtc != crtc)
9813
			continue;
9814
 
9815
		if (!encoders_cloneable(encoder, source_encoder))
9816
			return false;
9817
	}
9818
 
9819
	return true;
9820
}
9821
 
9822
static bool check_encoder_cloning(struct intel_crtc *crtc)
9823
{
9824
	struct drm_device *dev = crtc->base.dev;
4104 Serge 9825
	struct intel_encoder *encoder;
9826
 
5060 serge 9827
	list_for_each_entry(encoder,
9828
			    &dev->mode_config.encoder_list, base.head) {
9829
		if (encoder->new_crtc != crtc)
4104 Serge 9830
			continue;
9831
 
5060 serge 9832
		if (!check_single_encoder_cloning(crtc, encoder))
9833
			return false;
4104 Serge 9834
	}
9835
 
5060 serge 9836
	return true;
4104 Serge 9837
}
9838
 
3746 Serge 9839
static struct intel_crtc_config *
9840
intel_modeset_pipe_config(struct drm_crtc *crtc,
9841
			  struct drm_framebuffer *fb,
3031 serge 9842
			    struct drm_display_mode *mode)
9843
{
9844
	struct drm_device *dev = crtc->dev;
9845
	struct intel_encoder *encoder;
3746 Serge 9846
	struct intel_crtc_config *pipe_config;
4104 Serge 9847
	int plane_bpp, ret = -EINVAL;
9848
	bool retry = true;
3031 serge 9849
 
5060 serge 9850
	if (!check_encoder_cloning(to_intel_crtc(crtc))) {
4104 Serge 9851
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
9852
		return ERR_PTR(-EINVAL);
9853
	}
9854
 
3746 Serge 9855
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
9856
	if (!pipe_config)
3031 serge 9857
		return ERR_PTR(-ENOMEM);
9858
 
3746 Serge 9859
	drm_mode_copy(&pipe_config->adjusted_mode, mode);
9860
	drm_mode_copy(&pipe_config->requested_mode, mode);
4560 Serge 9861
 
4104 Serge 9862
	pipe_config->cpu_transcoder =
9863
		(enum transcoder) to_intel_crtc(crtc)->pipe;
9864
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
3746 Serge 9865
 
4104 Serge 9866
	/*
9867
	 * Sanitize sync polarity flags based on requested ones. If neither
9868
	 * positive or negative polarity is requested, treat this as meaning
9869
	 * negative polarity.
9870
	 */
9871
	if (!(pipe_config->adjusted_mode.flags &
9872
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
9873
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
9874
 
9875
	if (!(pipe_config->adjusted_mode.flags &
9876
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
9877
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
9878
 
9879
	/* Compute a starting value for pipe_config->pipe_bpp taking the source
9880
	 * plane pixel format and any sink constraints into account. Returns the
9881
	 * source plane bpp so that dithering can be selected on mismatches
9882
	 * after encoders and crtc also have had their say. */
9883
	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
9884
					      fb, pipe_config);
3746 Serge 9885
	if (plane_bpp < 0)
9886
		goto fail;
9887
 
4560 Serge 9888
	/*
9889
	 * Determine the real pipe dimensions. Note that stereo modes can
9890
	 * increase the actual pipe size due to the frame doubling and
9891
	 * insertion of additional space for blanks between the frame. This
9892
	 * is stored in the crtc timings. We use the requested mode to do this
9893
	 * computation to clearly distinguish it from the adjusted mode, which
9894
	 * can be changed by the connectors in the below retry loop.
9895
	 */
9896
	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
9897
	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
9898
	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
9899
 
4104 Serge 9900
encoder_retry:
9901
	/* Ensure the port clock defaults are reset when retrying. */
9902
	pipe_config->port_clock = 0;
9903
	pipe_config->pixel_multiplier = 1;
9904
 
9905
	/* Fill in default crtc timings, allow encoders to overwrite them. */
4560 Serge 9906
	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
4104 Serge 9907
 
3031 serge 9908
	/* Pass our mode to the connectors and the CRTC to give them a chance to
9909
	 * adjust it according to limitations or connector properties, and also
9910
	 * a chance to reject the mode entirely.
2330 Serge 9911
	 */
3031 serge 9912
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9913
			    base.head) {
2327 Serge 9914
 
3031 serge 9915
		if (&encoder->new_crtc->base != crtc)
9916
			continue;
3746 Serge 9917
 
9918
			if (!(encoder->compute_config(encoder, pipe_config))) {
9919
				DRM_DEBUG_KMS("Encoder config failure\n");
9920
				goto fail;
9921
			}
9922
		}
9923
 
4104 Serge 9924
	/* Set default port clock if not overwritten by the encoder. Needs to be
9925
	 * done afterwards in case the encoder adjusts the mode. */
9926
	if (!pipe_config->port_clock)
4560 Serge 9927
		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
9928
			* pipe_config->pixel_multiplier;
2327 Serge 9929
 
4104 Serge 9930
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
9931
	if (ret < 0) {
3031 serge 9932
		DRM_DEBUG_KMS("CRTC fixup failed\n");
9933
		goto fail;
9934
	}
2327 Serge 9935
 
4104 Serge 9936
	if (ret == RETRY) {
9937
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
9938
			ret = -EINVAL;
9939
			goto fail;
9940
		}
9941
 
9942
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
9943
		retry = false;
9944
		goto encoder_retry;
9945
	}
9946
 
3746 Serge 9947
	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
9948
	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
9949
		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
9950
 
9951
	return pipe_config;
3031 serge 9952
fail:
3746 Serge 9953
	kfree(pipe_config);
4104 Serge 9954
	return ERR_PTR(ret);
3031 serge 9955
}
2327 Serge 9956
 
3031 serge 9957
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
9958
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
9959
static void
9960
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9961
			     unsigned *prepare_pipes, unsigned *disable_pipes)
9962
{
9963
	struct intel_crtc *intel_crtc;
9964
	struct drm_device *dev = crtc->dev;
9965
	struct intel_encoder *encoder;
9966
	struct intel_connector *connector;
9967
	struct drm_crtc *tmp_crtc;
9968
 
9969
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
9970
 
9971
	/* Check which crtcs have changed outputs connected to them, these need
9972
	 * to be part of the prepare_pipes mask. We don't (yet) support global
9973
	 * modeset across multiple crtcs, so modeset_pipes will only have one
9974
	 * bit set at most. */
9975
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9976
			    base.head) {
9977
		if (connector->base.encoder == &connector->new_encoder->base)
9978
			continue;
9979
 
9980
		if (connector->base.encoder) {
9981
			tmp_crtc = connector->base.encoder->crtc;
9982
 
9983
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9984
		}
9985
 
9986
		if (connector->new_encoder)
9987
			*prepare_pipes |=
9988
				1 << connector->new_encoder->new_crtc->pipe;
9989
	}
9990
 
9991
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9992
			    base.head) {
9993
		if (encoder->base.crtc == &encoder->new_crtc->base)
9994
			continue;
9995
 
9996
		if (encoder->base.crtc) {
9997
			tmp_crtc = encoder->base.crtc;
9998
 
9999
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10000
		}
10001
 
10002
		if (encoder->new_crtc)
10003
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
10004
	}
10005
 
5060 serge 10006
	/* Check for pipes that will be enabled/disabled ... */
10007
	for_each_intel_crtc(dev, intel_crtc) {
10008
		if (intel_crtc->base.enabled == intel_crtc->new_enabled)
3031 serge 10009
			continue;
10010
 
5060 serge 10011
		if (!intel_crtc->new_enabled)
3031 serge 10012
			*disable_pipes |= 1 << intel_crtc->pipe;
5060 serge 10013
		else
10014
			*prepare_pipes |= 1 << intel_crtc->pipe;
3031 serge 10015
	}
10016
 
10017
 
10018
	/* set_mode is also used to update properties on life display pipes. */
10019
	intel_crtc = to_intel_crtc(crtc);
5060 serge 10020
	if (intel_crtc->new_enabled)
3031 serge 10021
		*prepare_pipes |= 1 << intel_crtc->pipe;
10022
 
3746 Serge 10023
	/*
10024
	 * For simplicity do a full modeset on any pipe where the output routing
10025
	 * changed. We could be more clever, but that would require us to be
10026
	 * more careful with calling the relevant encoder->mode_set functions.
10027
	 */
3031 serge 10028
	if (*prepare_pipes)
10029
		*modeset_pipes = *prepare_pipes;
10030
 
10031
	/* ... and mask these out. */
10032
	*modeset_pipes &= ~(*disable_pipes);
10033
	*prepare_pipes &= ~(*disable_pipes);
3746 Serge 10034
 
10035
	/*
10036
	 * HACK: We don't (yet) fully support global modesets. intel_set_config
10037
	 * obies this rule, but the modeset restore mode of
10038
	 * intel_modeset_setup_hw_state does not.
10039
	 */
10040
	*modeset_pipes &= 1 << intel_crtc->pipe;
10041
	*prepare_pipes &= 1 << intel_crtc->pipe;
4104 Serge 10042
 
10043
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
10044
		      *modeset_pipes, *prepare_pipes, *disable_pipes);
2330 Serge 10045
}
2327 Serge 10046
 
3031 serge 10047
static bool intel_crtc_in_use(struct drm_crtc *crtc)
2330 Serge 10048
{
3031 serge 10049
	struct drm_encoder *encoder;
2330 Serge 10050
	struct drm_device *dev = crtc->dev;
2327 Serge 10051
 
3031 serge 10052
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
10053
		if (encoder->crtc == crtc)
10054
			return true;
10055
 
10056
	return false;
10057
}
10058
 
10059
static void
10060
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10061
{
10062
	struct intel_encoder *intel_encoder;
10063
	struct intel_crtc *intel_crtc;
10064
	struct drm_connector *connector;
10065
 
10066
	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
10067
			    base.head) {
10068
		if (!intel_encoder->base.crtc)
10069
			continue;
10070
 
10071
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
10072
 
10073
		if (prepare_pipes & (1 << intel_crtc->pipe))
10074
			intel_encoder->connectors_active = false;
10075
	}
10076
 
10077
	intel_modeset_commit_output_state(dev);
10078
 
5060 serge 10079
	/* Double check state. */
10080
	for_each_intel_crtc(dev, intel_crtc) {
10081
		WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
10082
		WARN_ON(intel_crtc->new_config &&
10083
			intel_crtc->new_config != &intel_crtc->config);
10084
		WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
3031 serge 10085
	}
10086
 
10087
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10088
		if (!connector->encoder || !connector->encoder->crtc)
10089
			continue;
10090
 
10091
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
10092
 
10093
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
10094
			struct drm_property *dpms_property =
10095
				dev->mode_config.dpms_property;
10096
 
10097
			connector->dpms = DRM_MODE_DPMS_ON;
3243 Serge 10098
			drm_object_property_set_value(&connector->base,
3031 serge 10099
							 dpms_property,
10100
							 DRM_MODE_DPMS_ON);
10101
 
10102
			intel_encoder = to_intel_encoder(connector->encoder);
10103
			intel_encoder->connectors_active = true;
10104
		}
10105
	}
10106
 
10107
}
10108
 
4560 Serge 10109
static bool intel_fuzzy_clock_check(int clock1, int clock2)
4104 Serge 10110
{
4560 Serge 10111
	int diff;
4104 Serge 10112
 
10113
	if (clock1 == clock2)
10114
		return true;
10115
 
10116
	if (!clock1 || !clock2)
10117
		return false;
10118
 
10119
	diff = abs(clock1 - clock2);
10120
 
10121
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
10122
		return true;
10123
 
10124
	return false;
10125
}
10126
 
3031 serge 10127
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
10128
	list_for_each_entry((intel_crtc), \
10129
			    &(dev)->mode_config.crtc_list, \
10130
			    base.head) \
4104 Serge 10131
		if (mask & (1 <<(intel_crtc)->pipe))
3031 serge 10132
 
3746 Serge 10133
static bool
4104 Serge 10134
intel_pipe_config_compare(struct drm_device *dev,
10135
			  struct intel_crtc_config *current_config,
3746 Serge 10136
			  struct intel_crtc_config *pipe_config)
10137
{
4104 Serge 10138
#define PIPE_CONF_CHECK_X(name)	\
10139
	if (current_config->name != pipe_config->name) { \
10140
		DRM_ERROR("mismatch in " #name " " \
10141
			  "(expected 0x%08x, found 0x%08x)\n", \
10142
			  current_config->name, \
10143
			  pipe_config->name); \
10144
		return false; \
3746 Serge 10145
	}
10146
 
4104 Serge 10147
#define PIPE_CONF_CHECK_I(name)	\
10148
	if (current_config->name != pipe_config->name) { \
10149
		DRM_ERROR("mismatch in " #name " " \
10150
			  "(expected %i, found %i)\n", \
10151
			  current_config->name, \
10152
			  pipe_config->name); \
10153
		return false; \
10154
	}
10155
 
10156
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
10157
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
10158
		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
10159
			  "(expected %i, found %i)\n", \
10160
			  current_config->name & (mask), \
10161
			  pipe_config->name & (mask)); \
10162
		return false; \
10163
	}
10164
 
4560 Serge 10165
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
10166
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10167
		DRM_ERROR("mismatch in " #name " " \
10168
			  "(expected %i, found %i)\n", \
10169
			  current_config->name, \
10170
			  pipe_config->name); \
10171
		return false; \
10172
	}
10173
 
4104 Serge 10174
#define PIPE_CONF_QUIRK(quirk)	\
10175
	((current_config->quirks | pipe_config->quirks) & (quirk))
10176
 
10177
	PIPE_CONF_CHECK_I(cpu_transcoder);
10178
 
10179
	PIPE_CONF_CHECK_I(has_pch_encoder);
10180
	PIPE_CONF_CHECK_I(fdi_lanes);
10181
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
10182
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
10183
	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
10184
	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
10185
	PIPE_CONF_CHECK_I(fdi_m_n.tu);
10186
 
4560 Serge 10187
	PIPE_CONF_CHECK_I(has_dp_encoder);
10188
	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
10189
	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
10190
	PIPE_CONF_CHECK_I(dp_m_n.link_m);
10191
	PIPE_CONF_CHECK_I(dp_m_n.link_n);
10192
	PIPE_CONF_CHECK_I(dp_m_n.tu);
10193
 
4104 Serge 10194
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
10195
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
10196
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
10197
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
10198
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
10199
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
10200
 
10201
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
10202
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
10203
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
10204
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
10205
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
10206
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
10207
 
10208
		PIPE_CONF_CHECK_I(pixel_multiplier);
5060 serge 10209
	PIPE_CONF_CHECK_I(has_hdmi_sink);
10210
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
10211
	    IS_VALLEYVIEW(dev))
10212
		PIPE_CONF_CHECK_I(limited_color_range);
4104 Serge 10213
 
5060 serge 10214
	PIPE_CONF_CHECK_I(has_audio);
10215
 
4104 Serge 10216
	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10217
			      DRM_MODE_FLAG_INTERLACE);
10218
 
10219
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
10220
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10221
				      DRM_MODE_FLAG_PHSYNC);
10222
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10223
				      DRM_MODE_FLAG_NHSYNC);
10224
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10225
				      DRM_MODE_FLAG_PVSYNC);
10226
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10227
				      DRM_MODE_FLAG_NVSYNC);
10228
	}
10229
 
4560 Serge 10230
	PIPE_CONF_CHECK_I(pipe_src_w);
10231
	PIPE_CONF_CHECK_I(pipe_src_h);
4104 Serge 10232
 
5060 serge 10233
	/*
10234
	 * FIXME: BIOS likes to set up a cloned config with lvds+external
10235
	 * screen. Since we don't yet re-compute the pipe config when moving
10236
	 * just the lvds port away to another pipe the sw tracking won't match.
10237
	 *
10238
	 * Proper atomic modesets with recomputed global state will fix this.
10239
	 * Until then just don't check gmch state for inherited modes.
10240
	 */
10241
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
4104 Serge 10242
	PIPE_CONF_CHECK_I(gmch_pfit.control);
10243
	/* pfit ratios are autocomputed by the hw on gen4+ */
10244
	if (INTEL_INFO(dev)->gen < 4)
10245
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
10246
	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
5060 serge 10247
	}
10248
 
4104 Serge 10249
	PIPE_CONF_CHECK_I(pch_pfit.enabled);
10250
	if (current_config->pch_pfit.enabled) {
10251
	PIPE_CONF_CHECK_I(pch_pfit.pos);
10252
	PIPE_CONF_CHECK_I(pch_pfit.size);
10253
	}
10254
 
4560 Serge 10255
	/* BDW+ don't expose a synchronous way to read the state */
10256
	if (IS_HASWELL(dev))
4104 Serge 10257
	PIPE_CONF_CHECK_I(ips_enabled);
10258
 
4560 Serge 10259
	PIPE_CONF_CHECK_I(double_wide);
10260
 
5060 serge 10261
	PIPE_CONF_CHECK_X(ddi_pll_sel);
10262
 
4104 Serge 10263
	PIPE_CONF_CHECK_I(shared_dpll);
10264
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10265
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10266
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10267
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
5060 serge 10268
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
4104 Serge 10269
 
4280 Serge 10270
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10271
		PIPE_CONF_CHECK_I(pipe_bpp);
10272
 
4560 Serge 10273
		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
10274
		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10275
 
4104 Serge 10276
#undef PIPE_CONF_CHECK_X
10277
#undef PIPE_CONF_CHECK_I
10278
#undef PIPE_CONF_CHECK_FLAGS
4560 Serge 10279
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
4104 Serge 10280
#undef PIPE_CONF_QUIRK
10281
 
3746 Serge 10282
	return true;
10283
}
10284
 
4104 Serge 10285
static void
10286
check_connector_state(struct drm_device *dev)
3031 serge 10287
{
10288
	struct intel_connector *connector;
10289
 
10290
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10291
			    base.head) {
10292
		/* This also checks the encoder/connector hw state with the
10293
		 * ->get_hw_state callbacks. */
10294
		intel_connector_check_state(connector);
10295
 
10296
		WARN(&connector->new_encoder->base != connector->base.encoder,
10297
		     "connector's staged encoder doesn't match current encoder\n");
10298
	}
4104 Serge 10299
}
3031 serge 10300
 
4104 Serge 10301
static void
10302
check_encoder_state(struct drm_device *dev)
10303
{
10304
	struct intel_encoder *encoder;
10305
	struct intel_connector *connector;
10306
 
3031 serge 10307
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10308
			    base.head) {
10309
		bool enabled = false;
10310
		bool active = false;
10311
		enum pipe pipe, tracked_pipe;
10312
 
10313
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
10314
			      encoder->base.base.id,
5060 serge 10315
			      encoder->base.name);
3031 serge 10316
 
10317
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
10318
		     "encoder's stage crtc doesn't match current crtc\n");
10319
		WARN(encoder->connectors_active && !encoder->base.crtc,
10320
		     "encoder's active_connectors set, but no crtc\n");
10321
 
10322
		list_for_each_entry(connector, &dev->mode_config.connector_list,
10323
				    base.head) {
10324
			if (connector->base.encoder != &encoder->base)
10325
				continue;
10326
			enabled = true;
10327
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
10328
				active = true;
10329
		}
5060 serge 10330
		/*
10331
		 * for MST connectors if we unplug the connector is gone
10332
		 * away but the encoder is still connected to a crtc
10333
		 * until a modeset happens in response to the hotplug.
10334
		 */
10335
		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
10336
			continue;
10337
 
3031 serge 10338
		WARN(!!encoder->base.crtc != enabled,
10339
		     "encoder's enabled state mismatch "
10340
		     "(expected %i, found %i)\n",
10341
		     !!encoder->base.crtc, enabled);
10342
		WARN(active && !encoder->base.crtc,
10343
		     "active encoder with no crtc\n");
10344
 
10345
		WARN(encoder->connectors_active != active,
10346
		     "encoder's computed active state doesn't match tracked active state "
10347
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
10348
 
10349
		active = encoder->get_hw_state(encoder, &pipe);
10350
		WARN(active != encoder->connectors_active,
10351
		     "encoder's hw state doesn't match sw tracking "
10352
		     "(expected %i, found %i)\n",
10353
		     encoder->connectors_active, active);
10354
 
10355
		if (!encoder->base.crtc)
10356
			continue;
10357
 
10358
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
10359
		WARN(active && pipe != tracked_pipe,
10360
		     "active encoder's pipe doesn't match"
10361
		     "(expected %i, found %i)\n",
10362
		     tracked_pipe, pipe);
10363
 
10364
	}
4104 Serge 10365
}
3031 serge 10366
 
4104 Serge 10367
static void
10368
check_crtc_state(struct drm_device *dev)
10369
{
5060 serge 10370
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 10371
	struct intel_crtc *crtc;
10372
	struct intel_encoder *encoder;
10373
	struct intel_crtc_config pipe_config;
10374
 
5060 serge 10375
	for_each_intel_crtc(dev, crtc) {
3031 serge 10376
		bool enabled = false;
10377
		bool active = false;
10378
 
4104 Serge 10379
		memset(&pipe_config, 0, sizeof(pipe_config));
10380
 
3031 serge 10381
		DRM_DEBUG_KMS("[CRTC:%d]\n",
10382
			      crtc->base.base.id);
10383
 
10384
		WARN(crtc->active && !crtc->base.enabled,
10385
		     "active crtc, but not enabled in sw tracking\n");
10386
 
10387
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10388
				    base.head) {
10389
			if (encoder->base.crtc != &crtc->base)
10390
				continue;
10391
			enabled = true;
10392
			if (encoder->connectors_active)
10393
				active = true;
10394
		}
4104 Serge 10395
 
3031 serge 10396
		WARN(active != crtc->active,
10397
		     "crtc's computed active state doesn't match tracked active state "
10398
		     "(expected %i, found %i)\n", active, crtc->active);
10399
		WARN(enabled != crtc->base.enabled,
10400
		     "crtc's computed enabled state doesn't match tracked enabled state "
10401
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
10402
 
3746 Serge 10403
		active = dev_priv->display.get_pipe_config(crtc,
10404
							   &pipe_config);
10405
 
10406
		/* hw state is inconsistent with the pipe A quirk */
10407
		if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
10408
			active = crtc->active;
10409
 
4104 Serge 10410
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10411
				    base.head) {
10412
			enum pipe pipe;
10413
			if (encoder->base.crtc != &crtc->base)
10414
				continue;
4560 Serge 10415
			if (encoder->get_hw_state(encoder, &pipe))
4104 Serge 10416
				encoder->get_config(encoder, &pipe_config);
10417
		}
10418
 
3746 Serge 10419
		WARN(crtc->active != active,
10420
		     "crtc active state doesn't match with hw state "
10421
		     "(expected %i, found %i)\n", crtc->active, active);
10422
 
4104 Serge 10423
		if (active &&
10424
		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
10425
			WARN(1, "pipe state doesn't match!\n");
10426
			intel_dump_pipe_config(crtc, &pipe_config,
10427
					       "[hw state]");
10428
			intel_dump_pipe_config(crtc, &crtc->config,
10429
					       "[sw state]");
10430
		}
3031 serge 10431
	}
10432
}
10433
 
4104 Serge 10434
static void
10435
check_shared_dpll_state(struct drm_device *dev)
10436
{
5060 serge 10437
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 10438
	struct intel_crtc *crtc;
10439
	struct intel_dpll_hw_state dpll_hw_state;
10440
	int i;
10441
 
10442
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10443
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10444
		int enabled_crtcs = 0, active_crtcs = 0;
10445
		bool active;
10446
 
10447
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10448
 
10449
		DRM_DEBUG_KMS("%s\n", pll->name);
10450
 
10451
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
10452
 
10453
		WARN(pll->active > pll->refcount,
10454
		     "more active pll users than references: %i vs %i\n",
10455
		     pll->active, pll->refcount);
10456
		WARN(pll->active && !pll->on,
10457
		     "pll in active use but not on in sw tracking\n");
10458
		WARN(pll->on && !pll->active,
10459
		     "pll in on but not on in use in sw tracking\n");
10460
		WARN(pll->on != active,
10461
		     "pll on state mismatch (expected %i, found %i)\n",
10462
		     pll->on, active);
10463
 
5060 serge 10464
		for_each_intel_crtc(dev, crtc) {
4104 Serge 10465
			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
10466
				enabled_crtcs++;
10467
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10468
				active_crtcs++;
10469
		}
10470
		WARN(pll->active != active_crtcs,
10471
		     "pll active crtcs mismatch (expected %i, found %i)\n",
10472
		     pll->active, active_crtcs);
10473
		WARN(pll->refcount != enabled_crtcs,
10474
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
10475
		     pll->refcount, enabled_crtcs);
10476
 
10477
		WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
10478
				       sizeof(dpll_hw_state)),
10479
		     "pll hw state mismatch\n");
10480
	}
10481
}
10482
 
10483
void
10484
intel_modeset_check_state(struct drm_device *dev)
10485
{
10486
	check_connector_state(dev);
10487
	check_encoder_state(dev);
10488
	check_crtc_state(dev);
10489
	check_shared_dpll_state(dev);
10490
}
10491
 
4560 Serge 10492
void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
10493
				     int dotclock)
10494
{
10495
	/*
10496
	 * FDI already provided one idea for the dotclock.
10497
	 * Yell if the encoder disagrees.
10498
	 */
10499
	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
10500
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10501
	     pipe_config->adjusted_mode.crtc_clock, dotclock);
10502
}
10503
 
5060 serge 10504
static void update_scanline_offset(struct intel_crtc *crtc)
10505
{
10506
	struct drm_device *dev = crtc->base.dev;
10507
 
10508
	/*
10509
	 * The scanline counter increments at the leading edge of hsync.
10510
	 *
10511
	 * On most platforms it starts counting from vtotal-1 on the
10512
	 * first active line. That means the scanline counter value is
10513
	 * always one less than what we would expect. Ie. just after
10514
	 * start of vblank, which also occurs at start of hsync (on the
10515
	 * last active line), the scanline counter will read vblank_start-1.
10516
	 *
10517
	 * On gen2 the scanline counter starts counting from 1 instead
10518
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10519
	 * to keep the value positive), instead of adding one.
10520
	 *
10521
	 * On HSW+ the behaviour of the scanline counter depends on the output
10522
	 * type. For DP ports it behaves like most other platforms, but on HDMI
10523
	 * there's an extra 1 line difference. So we need to add two instead of
10524
	 * one to the value.
10525
	 */
10526
	if (IS_GEN2(dev)) {
10527
		const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
10528
		int vtotal;
10529
 
10530
		vtotal = mode->crtc_vtotal;
10531
		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
10532
			vtotal /= 2;
10533
 
10534
		crtc->scanline_offset = vtotal - 1;
10535
	} else if (HAS_DDI(dev) &&
10536
		   intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
10537
		crtc->scanline_offset = 2;
10538
	} else
10539
		crtc->scanline_offset = 1;
10540
}
10541
 
3746 Serge 10542
static int __intel_set_mode(struct drm_crtc *crtc,
3031 serge 10543
		    struct drm_display_mode *mode,
10544
		    int x, int y, struct drm_framebuffer *fb)
10545
{
10546
	struct drm_device *dev = crtc->dev;
5060 serge 10547
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 10548
	struct drm_display_mode *saved_mode;
3746 Serge 10549
	struct intel_crtc_config *pipe_config = NULL;
3031 serge 10550
	struct intel_crtc *intel_crtc;
10551
	unsigned disable_pipes, prepare_pipes, modeset_pipes;
3480 Serge 10552
	int ret = 0;
3031 serge 10553
 
4560 Serge 10554
	saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
3480 Serge 10555
	if (!saved_mode)
10556
		return -ENOMEM;
10557
 
3031 serge 10558
	intel_modeset_affected_pipes(crtc, &modeset_pipes,
10559
				     &prepare_pipes, &disable_pipes);
10560
 
3480 Serge 10561
	*saved_mode = crtc->mode;
3031 serge 10562
 
10563
	/* Hack: Because we don't (yet) support global modeset on multiple
10564
	 * crtcs, we don't keep track of the new mode for more than one crtc.
10565
	 * Hence simply check whether any bit is set in modeset_pipes in all the
10566
	 * pieces of code that are not yet converted to deal with mutliple crtcs
10567
	 * changing their mode at the same time. */
10568
	if (modeset_pipes) {
3746 Serge 10569
		pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
10570
		if (IS_ERR(pipe_config)) {
10571
			ret = PTR_ERR(pipe_config);
10572
			pipe_config = NULL;
10573
 
3480 Serge 10574
			goto out;
3031 serge 10575
		}
4104 Serge 10576
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
10577
				       "[modeset]");
5060 serge 10578
		to_intel_crtc(crtc)->new_config = pipe_config;
3031 serge 10579
	}
10580
 
4560 Serge 10581
	/*
10582
	 * See if the config requires any additional preparation, e.g.
10583
	 * to adjust global state with pipes off.  We need to do this
10584
	 * here so we can get the modeset_pipe updated config for the new
10585
	 * mode set on this crtc.  For other crtcs we need to use the
10586
	 * adjusted_mode bits in the crtc directly.
10587
	 */
10588
	if (IS_VALLEYVIEW(dev)) {
5060 serge 10589
		valleyview_modeset_global_pipes(dev, &prepare_pipes);
4560 Serge 10590
 
10591
		/* may have added more to prepare_pipes than we should */
10592
		prepare_pipes &= ~disable_pipes;
10593
	}
10594
 
3746 Serge 10595
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
10596
		intel_crtc_disable(&intel_crtc->base);
10597
 
3031 serge 10598
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10599
		if (intel_crtc->base.enabled)
10600
			dev_priv->display.crtc_disable(&intel_crtc->base);
10601
	}
10602
 
10603
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
10604
	 * to set it here already despite that we pass it down the callchain.
2330 Serge 10605
	 */
3746 Serge 10606
	if (modeset_pipes) {
3031 serge 10607
		crtc->mode = *mode;
3746 Serge 10608
		/* mode_set/enable/disable functions rely on a correct pipe
10609
		 * config. */
10610
		to_intel_crtc(crtc)->config = *pipe_config;
5060 serge 10611
		to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
4560 Serge 10612
 
10613
		/*
10614
		 * Calculate and store various constants which
10615
		 * are later needed by vblank and swap-completion
10616
		 * timestamping. They are derived from true hwmode.
10617
		 */
10618
		drm_calc_timestamping_constants(crtc,
10619
						&pipe_config->adjusted_mode);
3746 Serge 10620
	}
2327 Serge 10621
 
3031 serge 10622
	/* Only after disabling all output pipelines that will be changed can we
10623
	 * update the the output configuration. */
10624
	intel_modeset_update_state(dev, prepare_pipes);
10625
 
3243 Serge 10626
	if (dev_priv->display.modeset_global_resources)
10627
		dev_priv->display.modeset_global_resources(dev);
10628
 
3031 serge 10629
	/* Set up the DPLL and any encoders state that needs to adjust or depend
10630
	 * on the DPLL.
2330 Serge 10631
	 */
3031 serge 10632
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
5060 serge 10633
		struct drm_framebuffer *old_fb = crtc->primary->fb;
10634
		struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
10635
		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10636
 
10637
		mutex_lock(&dev->struct_mutex);
10638
		ret = intel_pin_and_fence_fb_obj(dev,
10639
						 obj,
10640
						 NULL);
10641
		if (ret != 0) {
10642
			DRM_ERROR("pin & fence failed\n");
10643
			mutex_unlock(&dev->struct_mutex);
10644
			goto done;
10645
		}
10646
		if (old_fb)
10647
			intel_unpin_fb_obj(old_obj);
10648
		i915_gem_track_fb(old_obj, obj,
10649
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
10650
		mutex_unlock(&dev->struct_mutex);
10651
 
10652
		crtc->primary->fb = fb;
10653
		crtc->x = x;
10654
		crtc->y = y;
10655
 
10656
		ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
3031 serge 10657
					   x, y, fb);
3480 Serge 10658
		if (ret)
3031 serge 10659
		    goto done;
10660
	}
10661
 
10662
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
5060 serge 10663
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10664
		update_scanline_offset(intel_crtc);
10665
 
3031 serge 10666
		dev_priv->display.crtc_enable(&intel_crtc->base);
5060 serge 10667
	}
3031 serge 10668
 
10669
	/* FIXME: add subpixel order */
10670
done:
4560 Serge 10671
	if (ret && crtc->enabled)
3480 Serge 10672
		crtc->mode = *saved_mode;
3031 serge 10673
 
3480 Serge 10674
out:
3746 Serge 10675
	kfree(pipe_config);
3480 Serge 10676
	kfree(saved_mode);
3031 serge 10677
	return ret;
2330 Serge 10678
}
2327 Serge 10679
 
4104 Serge 10680
static int intel_set_mode(struct drm_crtc *crtc,
3746 Serge 10681
		     struct drm_display_mode *mode,
10682
		     int x, int y, struct drm_framebuffer *fb)
10683
{
10684
	int ret;
10685
 
10686
	ret = __intel_set_mode(crtc, mode, x, y, fb);
10687
 
10688
	if (ret == 0)
10689
		intel_modeset_check_state(crtc->dev);
10690
 
10691
	return ret;
10692
}
10693
 
3480 Serge 10694
void intel_crtc_restore_mode(struct drm_crtc *crtc)
10695
{
5060 serge 10696
	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
3480 Serge 10697
}
10698
 
3031 serge 10699
#undef for_each_intel_crtc_masked
2327 Serge 10700
 
3031 serge 10701
static void intel_set_config_free(struct intel_set_config *config)
10702
{
10703
	if (!config)
10704
		return;
10705
 
10706
	kfree(config->save_connector_encoders);
10707
	kfree(config->save_encoder_crtcs);
5060 serge 10708
	kfree(config->save_crtc_enabled);
3031 serge 10709
	kfree(config);
10710
}
10711
 
10712
static int intel_set_config_save_state(struct drm_device *dev,
10713
				       struct intel_set_config *config)
10714
{
5060 serge 10715
	struct drm_crtc *crtc;
3031 serge 10716
	struct drm_encoder *encoder;
10717
	struct drm_connector *connector;
10718
	int count;
10719
 
5060 serge 10720
	config->save_crtc_enabled =
10721
		kcalloc(dev->mode_config.num_crtc,
10722
			sizeof(bool), GFP_KERNEL);
10723
	if (!config->save_crtc_enabled)
10724
		return -ENOMEM;
10725
 
3031 serge 10726
	config->save_encoder_crtcs =
10727
		kcalloc(dev->mode_config.num_encoder,
10728
			sizeof(struct drm_crtc *), GFP_KERNEL);
10729
	if (!config->save_encoder_crtcs)
10730
		return -ENOMEM;
10731
 
10732
	config->save_connector_encoders =
10733
		kcalloc(dev->mode_config.num_connector,
10734
			sizeof(struct drm_encoder *), GFP_KERNEL);
10735
	if (!config->save_connector_encoders)
10736
		return -ENOMEM;
10737
 
10738
	/* Copy data. Note that driver private data is not affected.
10739
	 * Should anything bad happen only the expected state is
10740
	 * restored, not the drivers personal bookkeeping.
10741
	 */
10742
	count = 0;
5060 serge 10743
	for_each_crtc(dev, crtc) {
10744
		config->save_crtc_enabled[count++] = crtc->enabled;
10745
	}
10746
 
10747
	count = 0;
3031 serge 10748
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
10749
		config->save_encoder_crtcs[count++] = encoder->crtc;
10750
	}
10751
 
10752
	count = 0;
10753
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10754
		config->save_connector_encoders[count++] = connector->encoder;
10755
	}
10756
 
10757
	return 0;
10758
}
10759
 
10760
static void intel_set_config_restore_state(struct drm_device *dev,
10761
					   struct intel_set_config *config)
10762
{
5060 serge 10763
	struct intel_crtc *crtc;
3031 serge 10764
	struct intel_encoder *encoder;
10765
	struct intel_connector *connector;
10766
	int count;
10767
 
10768
	count = 0;
5060 serge 10769
	for_each_intel_crtc(dev, crtc) {
10770
		crtc->new_enabled = config->save_crtc_enabled[count++];
10771
 
10772
		if (crtc->new_enabled)
10773
			crtc->new_config = &crtc->config;
10774
		else
10775
			crtc->new_config = NULL;
10776
	}
10777
 
10778
	count = 0;
3031 serge 10779
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10780
		encoder->new_crtc =
10781
			to_intel_crtc(config->save_encoder_crtcs[count++]);
10782
	}
10783
 
10784
	count = 0;
10785
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
10786
		connector->new_encoder =
10787
			to_intel_encoder(config->save_connector_encoders[count++]);
10788
	}
10789
}
10790
 
3746 Serge 10791
static bool
4104 Serge 10792
is_crtc_connector_off(struct drm_mode_set *set)
3746 Serge 10793
{
10794
	int i;
10795
 
4104 Serge 10796
	if (set->num_connectors == 0)
10797
		return false;
10798
 
10799
	if (WARN_ON(set->connectors == NULL))
10800
		return false;
10801
 
10802
	for (i = 0; i < set->num_connectors; i++)
10803
		if (set->connectors[i]->encoder &&
10804
		    set->connectors[i]->encoder->crtc == set->crtc &&
10805
		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
3746 Serge 10806
			return true;
10807
 
10808
	return false;
10809
}
10810
 
3031 serge 10811
static void
10812
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
10813
				      struct intel_set_config *config)
10814
{
10815
 
10816
	/* We should be able to check here if the fb has the same properties
10817
	 * and then just flip_or_move it */
4104 Serge 10818
	if (is_crtc_connector_off(set)) {
3746 Serge 10819
			config->mode_changed = true;
5060 serge 10820
	} else if (set->crtc->primary->fb != set->fb) {
10821
		/*
10822
		 * If we have no fb, we can only flip as long as the crtc is
10823
		 * active, otherwise we need a full mode set.  The crtc may
10824
		 * be active if we've only disabled the primary plane, or
10825
		 * in fastboot situations.
10826
		 */
10827
		if (set->crtc->primary->fb == NULL) {
4104 Serge 10828
			struct intel_crtc *intel_crtc =
10829
				to_intel_crtc(set->crtc);
10830
 
5060 serge 10831
			if (intel_crtc->active) {
4104 Serge 10832
				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
10833
				config->fb_changed = true;
10834
			} else {
10835
				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
3031 serge 10836
			config->mode_changed = true;
4104 Serge 10837
			}
3031 serge 10838
		} else if (set->fb == NULL) {
10839
			config->mode_changed = true;
3746 Serge 10840
		} else if (set->fb->pixel_format !=
5060 serge 10841
			   set->crtc->primary->fb->pixel_format) {
3031 serge 10842
			config->mode_changed = true;
3746 Serge 10843
		} else {
3031 serge 10844
			config->fb_changed = true;
10845
	}
3746 Serge 10846
	}
3031 serge 10847
 
10848
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
10849
		config->fb_changed = true;
10850
 
10851
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
10852
		DRM_DEBUG_KMS("modes are different, full mode set\n");
10853
		drm_mode_debug_printmodeline(&set->crtc->mode);
10854
		drm_mode_debug_printmodeline(set->mode);
10855
		config->mode_changed = true;
10856
	}
4104 Serge 10857
 
10858
	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
10859
			set->crtc->base.id, config->mode_changed, config->fb_changed);
3031 serge 10860
}
10861
 
10862
static int
10863
intel_modeset_stage_output_state(struct drm_device *dev,
10864
				 struct drm_mode_set *set,
10865
				 struct intel_set_config *config)
10866
{
10867
	struct intel_connector *connector;
10868
	struct intel_encoder *encoder;
5060 serge 10869
	struct intel_crtc *crtc;
4104 Serge 10870
	int ro;
3031 serge 10871
 
3480 Serge 10872
	/* The upper layers ensure that we either disable a crtc or have a list
3031 serge 10873
	 * of connectors. For paranoia, double-check this. */
10874
	WARN_ON(!set->fb && (set->num_connectors != 0));
10875
	WARN_ON(set->fb && (set->num_connectors == 0));
10876
 
10877
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10878
			    base.head) {
10879
		/* Otherwise traverse passed in connector list and get encoders
10880
		 * for them. */
10881
		for (ro = 0; ro < set->num_connectors; ro++) {
10882
			if (set->connectors[ro] == &connector->base) {
5060 serge 10883
				connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
3031 serge 10884
				break;
10885
			}
10886
		}
10887
 
10888
		/* If we disable the crtc, disable all its connectors. Also, if
10889
		 * the connector is on the changing crtc but not on the new
10890
		 * connector list, disable it. */
10891
		if ((!set->fb || ro == set->num_connectors) &&
10892
		    connector->base.encoder &&
10893
		    connector->base.encoder->crtc == set->crtc) {
10894
			connector->new_encoder = NULL;
10895
 
10896
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
10897
				connector->base.base.id,
5060 serge 10898
				connector->base.name);
3031 serge 10899
		}
10900
 
10901
 
10902
		if (&connector->new_encoder->base != connector->base.encoder) {
10903
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
10904
			config->mode_changed = true;
10905
		}
10906
	}
10907
	/* connector->new_encoder is now updated for all connectors. */
10908
 
10909
	/* Update crtc of enabled connectors. */
10910
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10911
			    base.head) {
5060 serge 10912
		struct drm_crtc *new_crtc;
10913
 
3031 serge 10914
		if (!connector->new_encoder)
10915
			continue;
10916
 
10917
		new_crtc = connector->new_encoder->base.crtc;
10918
 
10919
		for (ro = 0; ro < set->num_connectors; ro++) {
10920
			if (set->connectors[ro] == &connector->base)
10921
				new_crtc = set->crtc;
10922
		}
10923
 
10924
		/* Make sure the new CRTC will work with the encoder */
4560 Serge 10925
		if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
3031 serge 10926
					   new_crtc)) {
10927
			return -EINVAL;
10928
		}
5060 serge 10929
		connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
3031 serge 10930
 
10931
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
10932
			connector->base.base.id,
5060 serge 10933
			connector->base.name,
3031 serge 10934
			new_crtc->base.id);
10935
	}
10936
 
10937
	/* Check for any encoders that needs to be disabled. */
10938
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10939
			    base.head) {
4560 Serge 10940
		int num_connectors = 0;
3031 serge 10941
		list_for_each_entry(connector,
10942
				    &dev->mode_config.connector_list,
10943
				    base.head) {
10944
			if (connector->new_encoder == encoder) {
10945
				WARN_ON(!connector->new_encoder->new_crtc);
4560 Serge 10946
				num_connectors++;
3031 serge 10947
			}
10948
		}
4560 Serge 10949
 
10950
		if (num_connectors == 0)
3031 serge 10951
		encoder->new_crtc = NULL;
4560 Serge 10952
		else if (num_connectors > 1)
10953
			return -EINVAL;
10954
 
3031 serge 10955
		/* Only now check for crtc changes so we don't miss encoders
10956
		 * that will be disabled. */
10957
		if (&encoder->new_crtc->base != encoder->base.crtc) {
10958
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
10959
			config->mode_changed = true;
10960
		}
10961
	}
10962
	/* Now we've also updated encoder->new_crtc for all encoders. */
5060 serge 10963
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10964
			    base.head) {
10965
		if (connector->new_encoder)
10966
			if (connector->new_encoder != connector->encoder)
10967
				connector->encoder = connector->new_encoder;
10968
	}
10969
	for_each_intel_crtc(dev, crtc) {
10970
		crtc->new_enabled = false;
3031 serge 10971
 
5060 serge 10972
		list_for_each_entry(encoder,
10973
				    &dev->mode_config.encoder_list,
10974
				    base.head) {
10975
			if (encoder->new_crtc == crtc) {
10976
				crtc->new_enabled = true;
10977
				break;
10978
			}
10979
		}
10980
 
10981
		if (crtc->new_enabled != crtc->base.enabled) {
10982
			DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
10983
				      crtc->new_enabled ? "en" : "dis");
10984
			config->mode_changed = true;
10985
		}
10986
 
10987
		if (crtc->new_enabled)
10988
			crtc->new_config = &crtc->config;
10989
		else
10990
			crtc->new_config = NULL;
10991
	}
10992
 
3031 serge 10993
	return 0;
10994
}
10995
 
5060 serge 10996
static void disable_crtc_nofb(struct intel_crtc *crtc)
10997
{
10998
	struct drm_device *dev = crtc->base.dev;
10999
	struct intel_encoder *encoder;
11000
	struct intel_connector *connector;
11001
 
11002
	DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
11003
		      pipe_name(crtc->pipe));
11004
 
11005
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11006
		if (connector->new_encoder &&
11007
		    connector->new_encoder->new_crtc == crtc)
11008
			connector->new_encoder = NULL;
11009
	}
11010
 
11011
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
11012
		if (encoder->new_crtc == crtc)
11013
			encoder->new_crtc = NULL;
11014
	}
11015
 
11016
	crtc->new_enabled = false;
11017
	crtc->new_config = NULL;
11018
}
11019
 
3031 serge 11020
static int intel_crtc_set_config(struct drm_mode_set *set)
11021
{
11022
	struct drm_device *dev;
11023
	struct drm_mode_set save_set;
11024
	struct intel_set_config *config;
11025
	int ret;
11026
 
11027
	BUG_ON(!set);
11028
	BUG_ON(!set->crtc);
11029
	BUG_ON(!set->crtc->helper_private);
11030
 
3480 Serge 11031
	/* Enforce sane interface api - has been abused by the fb helper. */
11032
	BUG_ON(!set->mode && set->fb);
11033
	BUG_ON(set->fb && set->num_connectors == 0);
3031 serge 11034
 
11035
	if (set->fb) {
11036
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
11037
				set->crtc->base.id, set->fb->base.id,
11038
				(int)set->num_connectors, set->x, set->y);
11039
	} else {
11040
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
11041
	}
11042
 
11043
	dev = set->crtc->dev;
11044
 
11045
	ret = -ENOMEM;
11046
	config = kzalloc(sizeof(*config), GFP_KERNEL);
11047
	if (!config)
11048
		goto out_config;
11049
 
11050
	ret = intel_set_config_save_state(dev, config);
11051
	if (ret)
11052
		goto out_config;
11053
 
11054
	save_set.crtc = set->crtc;
11055
	save_set.mode = &set->crtc->mode;
11056
	save_set.x = set->crtc->x;
11057
	save_set.y = set->crtc->y;
5060 serge 11058
	save_set.fb = set->crtc->primary->fb;
3031 serge 11059
 
11060
	/* Compute whether we need a full modeset, only an fb base update or no
11061
	 * change at all. In the future we might also check whether only the
11062
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
11063
	 * such cases. */
11064
	intel_set_config_compute_mode_changes(set, config);
11065
 
11066
	ret = intel_modeset_stage_output_state(dev, set, config);
11067
	if (ret)
11068
		goto fail;
11069
 
11070
	if (config->mode_changed) {
3480 Serge 11071
		ret = intel_set_mode(set->crtc, set->mode,
11072
				     set->x, set->y, set->fb);
3031 serge 11073
	} else if (config->fb_changed) {
5060 serge 11074
		struct drm_i915_private *dev_priv = dev->dev_private;
11075
		struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
3746 Serge 11076
 
5060 serge 11077
 
3031 serge 11078
		ret = intel_pipe_set_base(set->crtc,
11079
					  set->x, set->y, set->fb);
5060 serge 11080
 
4560 Serge 11081
		/*
5060 serge 11082
		 * We need to make sure the primary plane is re-enabled if it
11083
		 * has previously been turned off.
11084
		 */
11085
		if (!intel_crtc->primary_enabled && ret == 0) {
11086
			WARN_ON(!intel_crtc->active);
11087
			intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
11088
						      intel_crtc->pipe);
11089
		}
11090
 
11091
		/*
4560 Serge 11092
		 * In the fastboot case this may be our only check of the
11093
		 * state after boot.  It would be better to only do it on
11094
		 * the first update, but we don't have a nice way of doing that
11095
		 * (and really, set_config isn't used much for high freq page
11096
		 * flipping, so increasing its cost here shouldn't be a big
11097
		 * deal).
11098
		 */
5060 serge 11099
		if (i915.fastboot && ret == 0)
4560 Serge 11100
			intel_modeset_check_state(set->crtc->dev);
3031 serge 11101
	}
11102
 
3746 Serge 11103
	if (ret) {
4104 Serge 11104
		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
3746 Serge 11105
			  set->crtc->base.id, ret);
3031 serge 11106
fail:
11107
	intel_set_config_restore_state(dev, config);
11108
 
5060 serge 11109
		/*
11110
		 * HACK: if the pipe was on, but we didn't have a framebuffer,
11111
		 * force the pipe off to avoid oopsing in the modeset code
11112
		 * due to fb==NULL. This should only happen during boot since
11113
		 * we don't yet reconstruct the FB from the hardware state.
11114
		 */
11115
		if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
11116
			disable_crtc_nofb(to_intel_crtc(save_set.crtc));
11117
 
3031 serge 11118
	/* Try to restore the config */
11119
	if (config->mode_changed &&
3480 Serge 11120
	    intel_set_mode(save_set.crtc, save_set.mode,
3031 serge 11121
			    save_set.x, save_set.y, save_set.fb))
11122
		DRM_ERROR("failed to restore config after modeset failure\n");
3746 Serge 11123
	}
3031 serge 11124
 
11125
out_config:
11126
	intel_set_config_free(config);
11127
	return ret;
11128
}
11129
 
2330 Serge 11130
static const struct drm_crtc_funcs intel_crtc_funcs = {
11131
	.gamma_set = intel_crtc_gamma_set,
3031 serge 11132
	.set_config = intel_crtc_set_config,
2330 Serge 11133
	.destroy = intel_crtc_destroy,
11134
//	.page_flip = intel_crtc_page_flip,
11135
};
2327 Serge 11136
 
4104 Serge 11137
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11138
				      struct intel_shared_dpll *pll,
11139
				      struct intel_dpll_hw_state *hw_state)
3031 serge 11140
{
4104 Serge 11141
	uint32_t val;
3031 serge 11142
 
5060 serge 11143
	if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
11144
		return false;
11145
 
4104 Serge 11146
	val = I915_READ(PCH_DPLL(pll->id));
11147
	hw_state->dpll = val;
11148
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
11149
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
11150
 
11151
	return val & DPLL_VCO_ENABLE;
11152
}
11153
 
11154
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
11155
				  struct intel_shared_dpll *pll)
11156
{
11157
	I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
11158
	I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
11159
}
11160
 
11161
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11162
				struct intel_shared_dpll *pll)
11163
{
11164
	/* PCH refclock must be enabled first */
4560 Serge 11165
	ibx_assert_pch_refclk_enabled(dev_priv);
4104 Serge 11166
 
11167
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
11168
 
11169
	/* Wait for the clocks to stabilize. */
11170
	POSTING_READ(PCH_DPLL(pll->id));
11171
	udelay(150);
11172
 
11173
	/* The pixel multiplier can only be updated once the
11174
	 * DPLL is enabled and the clocks are stable.
11175
	 *
11176
	 * So write it again.
11177
	 */
11178
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
11179
	POSTING_READ(PCH_DPLL(pll->id));
11180
	udelay(200);
11181
}
11182
 
11183
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
11184
				 struct intel_shared_dpll *pll)
11185
{
11186
	struct drm_device *dev = dev_priv->dev;
11187
	struct intel_crtc *crtc;
11188
 
11189
	/* Make sure no transcoder isn't still depending on us. */
5060 serge 11190
	for_each_intel_crtc(dev, crtc) {
4104 Serge 11191
		if (intel_crtc_to_shared_dpll(crtc) == pll)
11192
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
3031 serge 11193
	}
11194
 
4104 Serge 11195
	I915_WRITE(PCH_DPLL(pll->id), 0);
11196
	POSTING_READ(PCH_DPLL(pll->id));
11197
	udelay(200);
11198
}
11199
 
11200
static char *ibx_pch_dpll_names[] = {
11201
	"PCH DPLL A",
11202
	"PCH DPLL B",
11203
};
11204
 
11205
static void ibx_pch_dpll_init(struct drm_device *dev)
11206
{
11207
	struct drm_i915_private *dev_priv = dev->dev_private;
11208
	int i;
11209
 
11210
	dev_priv->num_shared_dpll = 2;
11211
 
11212
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11213
		dev_priv->shared_dplls[i].id = i;
11214
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
11215
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
11216
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
11217
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
11218
		dev_priv->shared_dplls[i].get_hw_state =
11219
			ibx_pch_dpll_get_hw_state;
3031 serge 11220
	}
11221
}
11222
 
4104 Serge 11223
static void intel_shared_dpll_init(struct drm_device *dev)
11224
{
11225
	struct drm_i915_private *dev_priv = dev->dev_private;
11226
 
5060 serge 11227
	if (HAS_DDI(dev))
11228
		intel_ddi_pll_init(dev);
11229
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4104 Serge 11230
		ibx_pch_dpll_init(dev);
11231
	else
11232
		dev_priv->num_shared_dpll = 0;
11233
 
11234
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
11235
}
11236
 
5060 serge 11237
static int
11238
intel_primary_plane_disable(struct drm_plane *plane)
11239
{
11240
	struct drm_device *dev = plane->dev;
11241
	struct drm_i915_private *dev_priv = dev->dev_private;
11242
	struct intel_plane *intel_plane = to_intel_plane(plane);
11243
	struct intel_crtc *intel_crtc;
11244
 
11245
	if (!plane->fb)
11246
		return 0;
11247
 
11248
	BUG_ON(!plane->crtc);
11249
 
11250
	intel_crtc = to_intel_crtc(plane->crtc);
11251
 
11252
	/*
11253
	 * Even though we checked plane->fb above, it's still possible that
11254
	 * the primary plane has been implicitly disabled because the crtc
11255
	 * coordinates given weren't visible, or because we detected
11256
	 * that it was 100% covered by a sprite plane.  Or, the CRTC may be
11257
	 * off and we've set a fb, but haven't actually turned on the CRTC yet.
11258
	 * In either case, we need to unpin the FB and let the fb pointer get
11259
	 * updated, but otherwise we don't need to touch the hardware.
11260
	 */
11261
	if (!intel_crtc->primary_enabled)
11262
		goto disable_unpin;
11263
 
11264
	intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
11265
				       intel_plane->pipe);
11266
disable_unpin:
11267
	mutex_lock(&dev->struct_mutex);
11268
	i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
11269
			  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11270
	intel_unpin_fb_obj(intel_fb_obj(plane->fb));
11271
	mutex_unlock(&dev->struct_mutex);
11272
	plane->fb = NULL;
11273
 
11274
	return 0;
11275
}
11276
 
11277
static int
11278
intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11279
			     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11280
			     unsigned int crtc_w, unsigned int crtc_h,
11281
			     uint32_t src_x, uint32_t src_y,
11282
			     uint32_t src_w, uint32_t src_h)
11283
{
11284
	struct drm_device *dev = crtc->dev;
11285
	struct drm_i915_private *dev_priv = dev->dev_private;
11286
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11287
	struct intel_plane *intel_plane = to_intel_plane(plane);
11288
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11289
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11290
	struct drm_rect dest = {
11291
		/* integer pixels */
11292
		.x1 = crtc_x,
11293
		.y1 = crtc_y,
11294
		.x2 = crtc_x + crtc_w,
11295
		.y2 = crtc_y + crtc_h,
11296
	};
11297
	struct drm_rect src = {
11298
		/* 16.16 fixed point */
11299
		.x1 = src_x,
11300
		.y1 = src_y,
11301
		.x2 = src_x + src_w,
11302
		.y2 = src_y + src_h,
11303
	};
11304
	const struct drm_rect clip = {
11305
		/* integer pixels */
11306
		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11307
		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11308
	};
11309
	bool visible;
11310
	int ret;
11311
 
11312
	ret = drm_plane_helper_check_update(plane, crtc, fb,
11313
					    &src, &dest, &clip,
11314
					    DRM_PLANE_HELPER_NO_SCALING,
11315
					    DRM_PLANE_HELPER_NO_SCALING,
11316
					    false, true, &visible);
11317
 
11318
	if (ret)
11319
		return ret;
11320
 
11321
	/*
11322
	 * If the CRTC isn't enabled, we're just pinning the framebuffer,
11323
	 * updating the fb pointer, and returning without touching the
11324
	 * hardware.  This allows us to later do a drmModeSetCrtc with fb=-1 to
11325
	 * turn on the display with all planes setup as desired.
11326
	 */
11327
	if (!crtc->enabled) {
11328
		mutex_lock(&dev->struct_mutex);
11329
 
11330
		/*
11331
		 * If we already called setplane while the crtc was disabled,
11332
		 * we may have an fb pinned; unpin it.
11333
		 */
11334
		if (plane->fb)
11335
			intel_unpin_fb_obj(old_obj);
11336
 
11337
		i915_gem_track_fb(old_obj, obj,
11338
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11339
 
11340
		/* Pin and return without programming hardware */
11341
		ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11342
		mutex_unlock(&dev->struct_mutex);
11343
 
11344
		return ret;
11345
	}
11346
 
11347
 
11348
	/*
11349
	 * If clipping results in a non-visible primary plane, we'll disable
11350
	 * the primary plane.  Note that this is a bit different than what
11351
	 * happens if userspace explicitly disables the plane by passing fb=0
11352
	 * because plane->fb still gets set and pinned.
11353
	 */
11354
	if (!visible) {
11355
		mutex_lock(&dev->struct_mutex);
11356
 
11357
		/*
11358
		 * Try to pin the new fb first so that we can bail out if we
11359
		 * fail.
11360
		 */
11361
		if (plane->fb != fb) {
11362
			ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11363
			if (ret) {
11364
				mutex_unlock(&dev->struct_mutex);
11365
				return ret;
11366
			}
11367
		}
11368
 
11369
		i915_gem_track_fb(old_obj, obj,
11370
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11371
 
11372
		if (intel_crtc->primary_enabled)
11373
			intel_disable_primary_hw_plane(dev_priv,
11374
						       intel_plane->plane,
11375
						       intel_plane->pipe);
11376
 
11377
 
11378
		if (plane->fb != fb)
11379
			if (plane->fb)
11380
				intel_unpin_fb_obj(old_obj);
11381
 
11382
		mutex_unlock(&dev->struct_mutex);
11383
 
11384
		return 0;
11385
	}
11386
 
11387
	ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
11388
	if (ret)
11389
		return ret;
11390
 
11391
	if (!intel_crtc->primary_enabled)
11392
		intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
11393
					      intel_crtc->pipe);
11394
 
11395
	return 0;
11396
}
11397
 
11398
/* Common destruction function for both primary and cursor planes */
11399
static void intel_plane_destroy(struct drm_plane *plane)
11400
{
11401
	struct intel_plane *intel_plane = to_intel_plane(plane);
11402
	drm_plane_cleanup(plane);
11403
	kfree(intel_plane);
11404
}
11405
 
11406
static const struct drm_plane_funcs intel_primary_plane_funcs = {
11407
	.update_plane = intel_primary_plane_setplane,
11408
	.disable_plane = intel_primary_plane_disable,
11409
	.destroy = intel_plane_destroy,
11410
};
11411
 
11412
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11413
						    int pipe)
11414
{
11415
	struct intel_plane *primary;
11416
	const uint32_t *intel_primary_formats;
11417
	int num_formats;
11418
 
11419
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
11420
	if (primary == NULL)
11421
		return NULL;
11422
 
11423
	primary->can_scale = false;
11424
	primary->max_downscale = 1;
11425
	primary->pipe = pipe;
11426
	primary->plane = pipe;
11427
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
11428
		primary->plane = !pipe;
11429
 
11430
	if (INTEL_INFO(dev)->gen <= 3) {
11431
		intel_primary_formats = intel_primary_formats_gen2;
11432
		num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
11433
	} else {
11434
		intel_primary_formats = intel_primary_formats_gen4;
11435
		num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
11436
	}
11437
 
11438
	drm_universal_plane_init(dev, &primary->base, 0,
11439
				 &intel_primary_plane_funcs,
11440
				 intel_primary_formats, num_formats,
11441
				 DRM_PLANE_TYPE_PRIMARY);
11442
	return &primary->base;
11443
}
11444
 
11445
static int
11446
intel_cursor_plane_disable(struct drm_plane *plane)
11447
{
11448
	if (!plane->fb)
11449
		return 0;
11450
 
11451
	BUG_ON(!plane->crtc);
11452
 
11453
	return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
11454
}
11455
 
11456
static int
11457
intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11458
			  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11459
			  unsigned int crtc_w, unsigned int crtc_h,
11460
			  uint32_t src_x, uint32_t src_y,
11461
			  uint32_t src_w, uint32_t src_h)
11462
{
11463
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11464
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11465
	struct drm_i915_gem_object *obj = intel_fb->obj;
11466
	struct drm_rect dest = {
11467
		/* integer pixels */
11468
		.x1 = crtc_x,
11469
		.y1 = crtc_y,
11470
		.x2 = crtc_x + crtc_w,
11471
		.y2 = crtc_y + crtc_h,
11472
	};
11473
	struct drm_rect src = {
11474
		/* 16.16 fixed point */
11475
		.x1 = src_x,
11476
		.y1 = src_y,
11477
		.x2 = src_x + src_w,
11478
		.y2 = src_y + src_h,
11479
	};
11480
	const struct drm_rect clip = {
11481
		/* integer pixels */
11482
		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11483
		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11484
	};
11485
	bool visible;
11486
	int ret;
11487
 
11488
	ret = drm_plane_helper_check_update(plane, crtc, fb,
11489
					    &src, &dest, &clip,
11490
					    DRM_PLANE_HELPER_NO_SCALING,
11491
					    DRM_PLANE_HELPER_NO_SCALING,
11492
					    true, true, &visible);
11493
	if (ret)
11494
		return ret;
11495
 
11496
	crtc->cursor_x = crtc_x;
11497
	crtc->cursor_y = crtc_y;
11498
	if (fb != crtc->cursor->fb) {
11499
		return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
11500
	} else {
11501
		intel_crtc_update_cursor(crtc, visible);
11502
		return 0;
11503
	}
11504
}
11505
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
11506
	.update_plane = intel_cursor_plane_update,
11507
	.disable_plane = intel_cursor_plane_disable,
11508
	.destroy = intel_plane_destroy,
11509
};
11510
 
11511
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
11512
						   int pipe)
11513
{
11514
	struct intel_plane *cursor;
11515
 
11516
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
11517
	if (cursor == NULL)
11518
		return NULL;
11519
 
11520
	cursor->can_scale = false;
11521
	cursor->max_downscale = 1;
11522
	cursor->pipe = pipe;
11523
	cursor->plane = pipe;
11524
 
11525
	drm_universal_plane_init(dev, &cursor->base, 0,
11526
				 &intel_cursor_plane_funcs,
11527
				 intel_cursor_formats,
11528
				 ARRAY_SIZE(intel_cursor_formats),
11529
				 DRM_PLANE_TYPE_CURSOR);
11530
	return &cursor->base;
11531
}
11532
 
2330 Serge 11533
static void intel_crtc_init(struct drm_device *dev, int pipe)
11534
{
5060 serge 11535
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 11536
	struct intel_crtc *intel_crtc;
5060 serge 11537
	struct drm_plane *primary = NULL;
11538
	struct drm_plane *cursor = NULL;
11539
	int i, ret;
2327 Serge 11540
 
4560 Serge 11541
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
2330 Serge 11542
	if (intel_crtc == NULL)
11543
		return;
2327 Serge 11544
 
5060 serge 11545
	primary = intel_primary_plane_create(dev, pipe);
11546
	if (!primary)
11547
		goto fail;
2327 Serge 11548
 
5060 serge 11549
	cursor = intel_cursor_plane_create(dev, pipe);
11550
	if (!cursor)
11551
		goto fail;
11552
 
11553
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
11554
					cursor, &intel_crtc_funcs);
11555
	if (ret)
11556
		goto fail;
11557
 
2330 Serge 11558
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
11559
	for (i = 0; i < 256; i++) {
11560
		intel_crtc->lut_r[i] = i;
11561
		intel_crtc->lut_g[i] = i;
11562
		intel_crtc->lut_b[i] = i;
11563
	}
2327 Serge 11564
 
4560 Serge 11565
	/*
11566
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
5060 serge 11567
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
4560 Serge 11568
	 */
2330 Serge 11569
	intel_crtc->pipe = pipe;
11570
	intel_crtc->plane = pipe;
4560 Serge 11571
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
2330 Serge 11572
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
11573
		intel_crtc->plane = !pipe;
11574
	}
2327 Serge 11575
 
5060 serge 11576
	intel_crtc->cursor_base = ~0;
11577
	intel_crtc->cursor_cntl = ~0;
11578
 
11579
	init_waitqueue_head(&intel_crtc->vbl_wait);
11580
 
2330 Serge 11581
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
11582
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
11583
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
11584
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 11585
 
2330 Serge 11586
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5060 serge 11587
 
11588
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
11589
	return;
11590
 
11591
fail:
11592
	if (primary)
11593
		drm_plane_cleanup(primary);
11594
	if (cursor)
11595
		drm_plane_cleanup(cursor);
11596
	kfree(intel_crtc);
2330 Serge 11597
}
2327 Serge 11598
 
4560 Serge 11599
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
11600
{
11601
	struct drm_encoder *encoder = connector->base.encoder;
5060 serge 11602
	struct drm_device *dev = connector->base.dev;
4560 Serge 11603
 
5060 serge 11604
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4560 Serge 11605
 
11606
	if (!encoder)
11607
		return INVALID_PIPE;
11608
 
11609
	return to_intel_crtc(encoder->crtc)->pipe;
11610
}
11611
 
3031 serge 11612
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
11613
				struct drm_file *file)
11614
{
11615
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
5060 serge 11616
	struct drm_crtc *drmmode_crtc;
3031 serge 11617
	struct intel_crtc *crtc;
2327 Serge 11618
 
3482 Serge 11619
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
11620
		return -ENODEV;
11621
 
5060 serge 11622
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
2327 Serge 11623
 
5060 serge 11624
	if (!drmmode_crtc) {
3031 serge 11625
		DRM_ERROR("no such CRTC id\n");
4560 Serge 11626
		return -ENOENT;
3031 serge 11627
	}
2327 Serge 11628
 
5060 serge 11629
	crtc = to_intel_crtc(drmmode_crtc);
3031 serge 11630
	pipe_from_crtc_id->pipe = crtc->pipe;
2327 Serge 11631
 
3031 serge 11632
	return 0;
11633
}
2327 Serge 11634
 
3031 serge 11635
static int intel_encoder_clones(struct intel_encoder *encoder)
2330 Serge 11636
{
3031 serge 11637
	struct drm_device *dev = encoder->base.dev;
11638
	struct intel_encoder *source_encoder;
2330 Serge 11639
	int index_mask = 0;
11640
	int entry = 0;
2327 Serge 11641
 
3031 serge 11642
	list_for_each_entry(source_encoder,
11643
			    &dev->mode_config.encoder_list, base.head) {
5060 serge 11644
		if (encoders_cloneable(encoder, source_encoder))
2330 Serge 11645
			index_mask |= (1 << entry);
3031 serge 11646
 
2330 Serge 11647
		entry++;
11648
	}
2327 Serge 11649
 
2330 Serge 11650
	return index_mask;
11651
}
2327 Serge 11652
 
2330 Serge 11653
static bool has_edp_a(struct drm_device *dev)
11654
{
11655
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 11656
 
2330 Serge 11657
	if (!IS_MOBILE(dev))
11658
		return false;
2327 Serge 11659
 
2330 Serge 11660
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
11661
		return false;
2327 Serge 11662
 
5060 serge 11663
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
2330 Serge 11664
		return false;
2327 Serge 11665
 
2330 Serge 11666
	return true;
11667
}
2327 Serge 11668
 
4560 Serge 11669
const char *intel_output_name(int output)
11670
{
11671
	static const char *names[] = {
11672
		[INTEL_OUTPUT_UNUSED] = "Unused",
11673
		[INTEL_OUTPUT_ANALOG] = "Analog",
11674
		[INTEL_OUTPUT_DVO] = "DVO",
11675
		[INTEL_OUTPUT_SDVO] = "SDVO",
11676
		[INTEL_OUTPUT_LVDS] = "LVDS",
11677
		[INTEL_OUTPUT_TVOUT] = "TV",
11678
		[INTEL_OUTPUT_HDMI] = "HDMI",
11679
		[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
11680
		[INTEL_OUTPUT_EDP] = "eDP",
11681
		[INTEL_OUTPUT_DSI] = "DSI",
11682
		[INTEL_OUTPUT_UNKNOWN] = "Unknown",
11683
	};
11684
 
11685
	if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
11686
		return "Invalid";
11687
 
11688
	return names[output];
11689
}
11690
 
5060 serge 11691
static bool intel_crt_present(struct drm_device *dev)
11692
{
11693
	struct drm_i915_private *dev_priv = dev->dev_private;
11694
 
11695
	if (IS_ULT(dev))
11696
		return false;
11697
 
11698
	if (IS_CHERRYVIEW(dev))
11699
		return false;
11700
 
11701
	if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
11702
		return false;
11703
 
11704
	return true;
11705
}
11706
 
2330 Serge 11707
static void intel_setup_outputs(struct drm_device *dev)
11708
{
11709
	struct drm_i915_private *dev_priv = dev->dev_private;
11710
	struct intel_encoder *encoder;
11711
	bool dpd_is_edp = false;
2327 Serge 11712
 
4104 Serge 11713
	intel_lvds_init(dev);
2327 Serge 11714
 
5060 serge 11715
	if (intel_crt_present(dev))
2330 Serge 11716
	intel_crt_init(dev);
2327 Serge 11717
 
3480 Serge 11718
	if (HAS_DDI(dev)) {
2330 Serge 11719
		int found;
2327 Serge 11720
 
3031 serge 11721
		/* Haswell uses DDI functions to detect digital outputs */
11722
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
11723
		/* DDI A only supports eDP */
11724
		if (found)
11725
			intel_ddi_init(dev, PORT_A);
11726
 
11727
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
11728
		 * register */
11729
		found = I915_READ(SFUSE_STRAP);
11730
 
11731
		if (found & SFUSE_STRAP_DDIB_DETECTED)
11732
			intel_ddi_init(dev, PORT_B);
11733
		if (found & SFUSE_STRAP_DDIC_DETECTED)
11734
			intel_ddi_init(dev, PORT_C);
11735
		if (found & SFUSE_STRAP_DDID_DETECTED)
11736
			intel_ddi_init(dev, PORT_D);
11737
	} else if (HAS_PCH_SPLIT(dev)) {
11738
		int found;
4560 Serge 11739
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
3031 serge 11740
 
3243 Serge 11741
		if (has_edp_a(dev))
11742
			intel_dp_init(dev, DP_A, PORT_A);
11743
 
3746 Serge 11744
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
2330 Serge 11745
			/* PCH SDVOB multiplex with HDMIB */
3031 serge 11746
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
2330 Serge 11747
			if (!found)
3746 Serge 11748
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
2330 Serge 11749
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
3031 serge 11750
				intel_dp_init(dev, PCH_DP_B, PORT_B);
2330 Serge 11751
		}
2327 Serge 11752
 
3746 Serge 11753
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
11754
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
2327 Serge 11755
 
3746 Serge 11756
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
11757
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
2327 Serge 11758
 
2330 Serge 11759
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
3031 serge 11760
			intel_dp_init(dev, PCH_DP_C, PORT_C);
2327 Serge 11761
 
3243 Serge 11762
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
3031 serge 11763
			intel_dp_init(dev, PCH_DP_D, PORT_D);
11764
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 11765
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
11766
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
11767
					PORT_B);
11768
			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
11769
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
11770
		}
11771
 
4104 Serge 11772
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
11773
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
11774
					PORT_C);
3480 Serge 11775
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
4560 Serge 11776
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
4104 Serge 11777
		}
3243 Serge 11778
 
5060 serge 11779
		if (IS_CHERRYVIEW(dev)) {
11780
			if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
11781
				intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
11782
						PORT_D);
11783
				if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
11784
					intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
11785
			}
11786
		}
11787
 
4560 Serge 11788
		intel_dsi_init(dev);
2330 Serge 11789
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
11790
		bool found = false;
2327 Serge 11791
 
3746 Serge 11792
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 11793
			DRM_DEBUG_KMS("probing SDVOB\n");
3746 Serge 11794
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
2330 Serge 11795
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
11796
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
3746 Serge 11797
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
2330 Serge 11798
			}
2327 Serge 11799
 
4104 Serge 11800
			if (!found && SUPPORTS_INTEGRATED_DP(dev))
3031 serge 11801
				intel_dp_init(dev, DP_B, PORT_B);
2330 Serge 11802
			}
2327 Serge 11803
 
2330 Serge 11804
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 11805
 
3746 Serge 11806
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 11807
			DRM_DEBUG_KMS("probing SDVOC\n");
3746 Serge 11808
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
2330 Serge 11809
		}
2327 Serge 11810
 
3746 Serge 11811
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
2327 Serge 11812
 
2330 Serge 11813
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
11814
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
3746 Serge 11815
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
2330 Serge 11816
			}
4104 Serge 11817
			if (SUPPORTS_INTEGRATED_DP(dev))
3031 serge 11818
				intel_dp_init(dev, DP_C, PORT_C);
2330 Serge 11819
			}
2327 Serge 11820
 
2330 Serge 11821
		if (SUPPORTS_INTEGRATED_DP(dev) &&
4104 Serge 11822
		    (I915_READ(DP_D) & DP_DETECTED))
3031 serge 11823
			intel_dp_init(dev, DP_D, PORT_D);
2330 Serge 11824
	} else if (IS_GEN2(dev))
11825
		intel_dvo_init(dev);
2327 Serge 11826
 
11827
 
5060 serge 11828
	intel_edp_psr_init(dev);
11829
 
2330 Serge 11830
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
11831
		encoder->base.possible_crtcs = encoder->crtc_mask;
11832
		encoder->base.possible_clones =
3031 serge 11833
			intel_encoder_clones(encoder);
2330 Serge 11834
	}
2327 Serge 11835
 
3243 Serge 11836
	intel_init_pch_refclk(dev);
11837
 
11838
	drm_helper_move_panel_connectors_to_head(dev);
2330 Serge 11839
}
11840
 
11841
 
11842
 
2335 Serge 11843
static const struct drm_framebuffer_funcs intel_fb_funcs = {
11844
//	.destroy = intel_user_framebuffer_destroy,
11845
//	.create_handle = intel_user_framebuffer_create_handle,
11846
};
2327 Serge 11847
 
5060 serge 11848
static int intel_framebuffer_init(struct drm_device *dev,
2335 Serge 11849
			   struct intel_framebuffer *intel_fb,
2342 Serge 11850
			   struct drm_mode_fb_cmd2 *mode_cmd,
2335 Serge 11851
			   struct drm_i915_gem_object *obj)
11852
{
5060 serge 11853
	int aligned_height;
4104 Serge 11854
	int pitch_limit;
2335 Serge 11855
	int ret;
2327 Serge 11856
 
4560 Serge 11857
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
11858
 
3243 Serge 11859
	if (obj->tiling_mode == I915_TILING_Y) {
11860
		DRM_DEBUG("hardware does not support tiling Y\n");
2335 Serge 11861
		return -EINVAL;
3243 Serge 11862
	}
2327 Serge 11863
 
3243 Serge 11864
	if (mode_cmd->pitches[0] & 63) {
11865
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
11866
			  mode_cmd->pitches[0]);
11867
		return -EINVAL;
11868
	}
11869
 
4104 Serge 11870
	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
11871
		pitch_limit = 32*1024;
11872
	} else if (INTEL_INFO(dev)->gen >= 4) {
11873
		if (obj->tiling_mode)
11874
			pitch_limit = 16*1024;
11875
		else
11876
			pitch_limit = 32*1024;
11877
	} else if (INTEL_INFO(dev)->gen >= 3) {
11878
		if (obj->tiling_mode)
11879
			pitch_limit = 8*1024;
11880
		else
11881
			pitch_limit = 16*1024;
11882
	} else
11883
		/* XXX DSPC is limited to 4k tiled */
11884
		pitch_limit = 8*1024;
11885
 
11886
	if (mode_cmd->pitches[0] > pitch_limit) {
11887
		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
11888
			  obj->tiling_mode ? "tiled" : "linear",
11889
			  mode_cmd->pitches[0], pitch_limit);
3243 Serge 11890
		return -EINVAL;
11891
	}
11892
 
11893
	if (obj->tiling_mode != I915_TILING_NONE &&
11894
	    mode_cmd->pitches[0] != obj->stride) {
11895
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
11896
			  mode_cmd->pitches[0], obj->stride);
2335 Serge 11897
			return -EINVAL;
3243 Serge 11898
	}
2327 Serge 11899
 
3243 Serge 11900
	/* Reject formats not supported by any plane early. */
2342 Serge 11901
	switch (mode_cmd->pixel_format) {
3243 Serge 11902
	case DRM_FORMAT_C8:
2342 Serge 11903
	case DRM_FORMAT_RGB565:
11904
	case DRM_FORMAT_XRGB8888:
3243 Serge 11905
	case DRM_FORMAT_ARGB8888:
11906
		break;
11907
	case DRM_FORMAT_XRGB1555:
11908
	case DRM_FORMAT_ARGB1555:
11909
		if (INTEL_INFO(dev)->gen > 3) {
4104 Serge 11910
			DRM_DEBUG("unsupported pixel format: %s\n",
11911
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 11912
			return -EINVAL;
11913
		}
11914
		break;
3031 serge 11915
	case DRM_FORMAT_XBGR8888:
3243 Serge 11916
	case DRM_FORMAT_ABGR8888:
2342 Serge 11917
	case DRM_FORMAT_XRGB2101010:
11918
	case DRM_FORMAT_ARGB2101010:
3243 Serge 11919
	case DRM_FORMAT_XBGR2101010:
11920
	case DRM_FORMAT_ABGR2101010:
11921
		if (INTEL_INFO(dev)->gen < 4) {
4104 Serge 11922
			DRM_DEBUG("unsupported pixel format: %s\n",
11923
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 11924
			return -EINVAL;
11925
		}
2335 Serge 11926
		break;
2342 Serge 11927
	case DRM_FORMAT_YUYV:
11928
	case DRM_FORMAT_UYVY:
11929
	case DRM_FORMAT_YVYU:
11930
	case DRM_FORMAT_VYUY:
3243 Serge 11931
		if (INTEL_INFO(dev)->gen < 5) {
4104 Serge 11932
			DRM_DEBUG("unsupported pixel format: %s\n",
11933
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 11934
			return -EINVAL;
11935
		}
2342 Serge 11936
		break;
2335 Serge 11937
	default:
4104 Serge 11938
		DRM_DEBUG("unsupported pixel format: %s\n",
11939
			  drm_get_format_name(mode_cmd->pixel_format));
2335 Serge 11940
		return -EINVAL;
11941
	}
2327 Serge 11942
 
3243 Serge 11943
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11944
	if (mode_cmd->offsets[0] != 0)
11945
		return -EINVAL;
11946
 
5060 serge 11947
	aligned_height = intel_align_height(dev, mode_cmd->height,
11948
					    obj->tiling_mode);
4560 Serge 11949
	/* FIXME drm helper for size checks (especially planar formats)? */
11950
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
11951
		return -EINVAL;
11952
 
3480 Serge 11953
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
11954
	intel_fb->obj = obj;
4560 Serge 11955
	intel_fb->obj->framebuffer_references++;
3480 Serge 11956
 
2335 Serge 11957
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
11958
	if (ret) {
11959
		DRM_ERROR("framebuffer init failed %d\n", ret);
11960
		return ret;
11961
	}
2327 Serge 11962
 
2335 Serge 11963
	return 0;
11964
}
2327 Serge 11965
 
4560 Serge 11966
#ifndef CONFIG_DRM_I915_FBDEV
11967
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
11968
{
11969
}
11970
#endif
2327 Serge 11971
 
2360 Serge 11972
static const struct drm_mode_config_funcs intel_mode_funcs = {
4560 Serge 11973
	.fb_create = NULL,
11974
	.output_poll_changed = intel_fbdev_output_poll_changed,
2360 Serge 11975
};
2327 Serge 11976
 
3031 serge 11977
/* Set up chip specific display functions */
11978
static void intel_init_display(struct drm_device *dev)
11979
{
11980
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 11981
 
4104 Serge 11982
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
11983
		dev_priv->display.find_dpll = g4x_find_best_dpll;
5060 serge 11984
	else if (IS_CHERRYVIEW(dev))
11985
		dev_priv->display.find_dpll = chv_find_best_dpll;
4104 Serge 11986
	else if (IS_VALLEYVIEW(dev))
11987
		dev_priv->display.find_dpll = vlv_find_best_dpll;
11988
	else if (IS_PINEVIEW(dev))
11989
		dev_priv->display.find_dpll = pnv_find_best_dpll;
11990
	else
11991
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
11992
 
3480 Serge 11993
	if (HAS_DDI(dev)) {
3746 Serge 11994
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5060 serge 11995
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
3243 Serge 11996
		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
11997
		dev_priv->display.crtc_enable = haswell_crtc_enable;
11998
		dev_priv->display.crtc_disable = haswell_crtc_disable;
5060 serge 11999
		dev_priv->display.off = ironlake_crtc_off;
12000
		dev_priv->display.update_primary_plane =
12001
			ironlake_update_primary_plane;
3243 Serge 12002
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 12003
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
5060 serge 12004
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
3031 serge 12005
		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
12006
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
12007
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
12008
		dev_priv->display.off = ironlake_crtc_off;
5060 serge 12009
		dev_priv->display.update_primary_plane =
12010
			ironlake_update_primary_plane;
4104 Serge 12011
	} else if (IS_VALLEYVIEW(dev)) {
12012
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5060 serge 12013
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
4104 Serge 12014
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
12015
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
12016
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12017
		dev_priv->display.off = i9xx_crtc_off;
5060 serge 12018
		dev_priv->display.update_primary_plane =
12019
			i9xx_update_primary_plane;
3031 serge 12020
	} else {
3746 Serge 12021
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5060 serge 12022
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
3031 serge 12023
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
12024
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
12025
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12026
		dev_priv->display.off = i9xx_crtc_off;
5060 serge 12027
		dev_priv->display.update_primary_plane =
12028
			i9xx_update_primary_plane;
3031 serge 12029
	}
2327 Serge 12030
 
3031 serge 12031
	/* Returns the core display clock speed */
12032
	if (IS_VALLEYVIEW(dev))
12033
		dev_priv->display.get_display_clock_speed =
12034
			valleyview_get_display_clock_speed;
12035
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
12036
		dev_priv->display.get_display_clock_speed =
12037
			i945_get_display_clock_speed;
12038
	else if (IS_I915G(dev))
12039
		dev_priv->display.get_display_clock_speed =
12040
			i915_get_display_clock_speed;
4104 Serge 12041
	else if (IS_I945GM(dev) || IS_845G(dev))
3031 serge 12042
		dev_priv->display.get_display_clock_speed =
12043
			i9xx_misc_get_display_clock_speed;
4104 Serge 12044
	else if (IS_PINEVIEW(dev))
12045
		dev_priv->display.get_display_clock_speed =
12046
			pnv_get_display_clock_speed;
3031 serge 12047
	else if (IS_I915GM(dev))
12048
		dev_priv->display.get_display_clock_speed =
12049
			i915gm_get_display_clock_speed;
12050
	else if (IS_I865G(dev))
12051
		dev_priv->display.get_display_clock_speed =
12052
			i865_get_display_clock_speed;
12053
	else if (IS_I85X(dev))
12054
		dev_priv->display.get_display_clock_speed =
12055
			i855_get_display_clock_speed;
12056
	else /* 852, 830 */
12057
		dev_priv->display.get_display_clock_speed =
12058
			i830_get_display_clock_speed;
2327 Serge 12059
 
3031 serge 12060
	if (HAS_PCH_SPLIT(dev)) {
12061
		if (IS_GEN5(dev)) {
12062
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12063
			dev_priv->display.write_eld = ironlake_write_eld;
12064
		} else if (IS_GEN6(dev)) {
12065
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12066
			dev_priv->display.write_eld = ironlake_write_eld;
5060 serge 12067
			dev_priv->display.modeset_global_resources =
12068
				snb_modeset_global_resources;
3031 serge 12069
		} else if (IS_IVYBRIDGE(dev)) {
12070
			/* FIXME: detect B0+ stepping and use auto training */
12071
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12072
			dev_priv->display.write_eld = ironlake_write_eld;
3243 Serge 12073
			dev_priv->display.modeset_global_resources =
12074
				ivb_modeset_global_resources;
4560 Serge 12075
		} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
3031 serge 12076
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12077
			dev_priv->display.write_eld = haswell_write_eld;
3480 Serge 12078
			dev_priv->display.modeset_global_resources =
12079
				haswell_modeset_global_resources;
12080
		}
3031 serge 12081
	} else if (IS_G4X(dev)) {
12082
		dev_priv->display.write_eld = g4x_write_eld;
4560 Serge 12083
	} else if (IS_VALLEYVIEW(dev)) {
12084
		dev_priv->display.modeset_global_resources =
12085
			valleyview_modeset_global_resources;
12086
		dev_priv->display.write_eld = ironlake_write_eld;
3031 serge 12087
	}
2327 Serge 12088
 
3031 serge 12089
	/* Default just returns -ENODEV to indicate unsupported */
12090
//	dev_priv->display.queue_flip = intel_default_queue_flip;
2327 Serge 12091
 
12092
 
12093
 
12094
 
4560 Serge 12095
	intel_panel_init_backlight_funcs(dev);
3031 serge 12096
}
12097
 
12098
/*
12099
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
12100
 * resume, or other times.  This quirk makes sure that's the case for
12101
 * affected systems.
12102
 */
12103
static void quirk_pipea_force(struct drm_device *dev)
2330 Serge 12104
{
12105
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 12106
 
3031 serge 12107
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
12108
	DRM_INFO("applying pipe a force quirk\n");
12109
}
2327 Serge 12110
 
3031 serge 12111
/*
12112
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12113
 */
12114
static void quirk_ssc_force_disable(struct drm_device *dev)
12115
{
12116
	struct drm_i915_private *dev_priv = dev->dev_private;
12117
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
12118
	DRM_INFO("applying lvds SSC disable quirk\n");
2330 Serge 12119
}
2327 Serge 12120
 
3031 serge 12121
/*
12122
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
12123
 * brightness value
12124
 */
12125
static void quirk_invert_brightness(struct drm_device *dev)
2330 Serge 12126
{
12127
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12128
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
12129
	DRM_INFO("applying inverted panel brightness quirk\n");
12130
}
2327 Serge 12131
 
5060 serge 12132
/* Some VBT's incorrectly indicate no backlight is present */
12133
static void quirk_backlight_present(struct drm_device *dev)
12134
{
12135
	struct drm_i915_private *dev_priv = dev->dev_private;
12136
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
12137
	DRM_INFO("applying backlight present quirk\n");
12138
}
12139
 
3031 serge 12140
struct intel_quirk {
12141
	int device;
12142
	int subsystem_vendor;
12143
	int subsystem_device;
12144
	void (*hook)(struct drm_device *dev);
12145
};
2327 Serge 12146
 
3031 serge 12147
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
12148
struct intel_dmi_quirk {
12149
	void (*hook)(struct drm_device *dev);
12150
	const struct dmi_system_id (*dmi_id_list)[];
12151
};
2327 Serge 12152
 
3031 serge 12153
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
12154
{
12155
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
12156
	return 1;
2330 Serge 12157
}
2327 Serge 12158
 
3031 serge 12159
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
12160
	{
12161
		.dmi_id_list = &(const struct dmi_system_id[]) {
12162
			{
12163
				.callback = intel_dmi_reverse_brightness,
12164
				.ident = "NCR Corporation",
12165
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
12166
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
12167
				},
12168
			},
12169
			{ }  /* terminating entry */
12170
		},
12171
		.hook = quirk_invert_brightness,
12172
	},
12173
};
2327 Serge 12174
 
3031 serge 12175
static struct intel_quirk intel_quirks[] = {
12176
	/* HP Mini needs pipe A force quirk (LP: #322104) */
12177
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
2327 Serge 12178
 
3031 serge 12179
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
12180
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
2327 Serge 12181
 
3031 serge 12182
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
12183
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
2327 Serge 12184
 
3031 serge 12185
	/* Lenovo U160 cannot use SSC on LVDS */
12186
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
2327 Serge 12187
 
3031 serge 12188
	/* Sony Vaio Y cannot use SSC on LVDS */
12189
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
2327 Serge 12190
 
3031 serge 12191
	/* Acer Aspire 5734Z must invert backlight brightness */
12192
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
3480 Serge 12193
 
12194
	/* Acer/eMachines G725 */
12195
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
12196
 
12197
	/* Acer/eMachines e725 */
12198
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
12199
 
12200
	/* Acer/Packard Bell NCL20 */
12201
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
12202
 
12203
	/* Acer Aspire 4736Z */
12204
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
5060 serge 12205
 
12206
	/* Acer Aspire 5336 */
12207
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
12208
 
12209
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
12210
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
12211
 
5097 serge 12212
	/* Acer C720 Chromebook (Core i3 4005U) */
12213
	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
12214
 
5060 serge 12215
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
12216
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
12217
 
12218
	/* HP Chromebook 14 (Celeron 2955U) */
12219
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
3031 serge 12220
};
2327 Serge 12221
 
3031 serge 12222
static void intel_init_quirks(struct drm_device *dev)
2330 Serge 12223
{
3031 serge 12224
	struct pci_dev *d = dev->pdev;
12225
	int i;
2327 Serge 12226
 
3031 serge 12227
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
12228
		struct intel_quirk *q = &intel_quirks[i];
2327 Serge 12229
 
3031 serge 12230
		if (d->device == q->device &&
12231
		    (d->subsystem_vendor == q->subsystem_vendor ||
12232
		     q->subsystem_vendor == PCI_ANY_ID) &&
12233
		    (d->subsystem_device == q->subsystem_device ||
12234
		     q->subsystem_device == PCI_ANY_ID))
12235
			q->hook(dev);
12236
	}
5097 serge 12237
	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
12238
		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
12239
			intel_dmi_quirks[i].hook(dev);
12240
	}
2330 Serge 12241
}
2327 Serge 12242
 
3031 serge 12243
/* Disable the VGA plane that we never use */
12244
static void i915_disable_vga(struct drm_device *dev)
2330 Serge 12245
{
12246
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12247
	u8 sr1;
3480 Serge 12248
	u32 vga_reg = i915_vgacntrl_reg(dev);
2327 Serge 12249
 
4560 Serge 12250
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
12251
	outb(SR01, VGA_SR_INDEX);
12252
	sr1 = inb(VGA_SR_DATA);
12253
	outb(sr1 | 1<<5, VGA_SR_DATA);
12254
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
3031 serge 12255
	udelay(300);
2327 Serge 12256
 
3031 serge 12257
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
12258
	POSTING_READ(vga_reg);
2330 Serge 12259
}
12260
 
3031 serge 12261
void intel_modeset_init_hw(struct drm_device *dev)
2342 Serge 12262
{
3031 serge 12263
	intel_prepare_ddi(dev);
2342 Serge 12264
 
5060 serge 12265
	if (IS_VALLEYVIEW(dev))
12266
		vlv_update_cdclk(dev);
12267
 
3031 serge 12268
	intel_init_clock_gating(dev);
12269
 
4560 Serge 12270
	intel_reset_dpio(dev);
4398 Serge 12271
 
3482 Serge 12272
    intel_enable_gt_powersave(dev);
2342 Serge 12273
}
12274
 
4398 Serge 12275
void intel_modeset_suspend_hw(struct drm_device *dev)
12276
{
12277
	intel_suspend_hw(dev);
12278
}
12279
 
3031 serge 12280
void intel_modeset_init(struct drm_device *dev)
2330 Serge 12281
{
3031 serge 12282
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 12283
	int sprite, ret;
12284
	enum pipe pipe;
12285
	struct intel_crtc *crtc;
2330 Serge 12286
 
3031 serge 12287
	drm_mode_config_init(dev);
2330 Serge 12288
 
3031 serge 12289
	dev->mode_config.min_width = 0;
12290
	dev->mode_config.min_height = 0;
2330 Serge 12291
 
3031 serge 12292
	dev->mode_config.preferred_depth = 24;
12293
	dev->mode_config.prefer_shadow = 1;
2330 Serge 12294
 
3031 serge 12295
	dev->mode_config.funcs = &intel_mode_funcs;
2330 Serge 12296
 
3031 serge 12297
	intel_init_quirks(dev);
2330 Serge 12298
 
3031 serge 12299
	intel_init_pm(dev);
2330 Serge 12300
 
3746 Serge 12301
	if (INTEL_INFO(dev)->num_pipes == 0)
12302
		return;
12303
 
3031 serge 12304
	intel_init_display(dev);
2330 Serge 12305
 
3031 serge 12306
	if (IS_GEN2(dev)) {
12307
		dev->mode_config.max_width = 2048;
12308
		dev->mode_config.max_height = 2048;
12309
	} else if (IS_GEN3(dev)) {
12310
		dev->mode_config.max_width = 4096;
12311
		dev->mode_config.max_height = 4096;
12312
	} else {
12313
		dev->mode_config.max_width = 8192;
12314
		dev->mode_config.max_height = 8192;
12315
	}
5060 serge 12316
 
12317
	if (IS_GEN2(dev)) {
12318
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
12319
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
12320
	} else {
12321
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
12322
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
12323
	}
12324
 
3480 Serge 12325
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
2330 Serge 12326
 
3031 serge 12327
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
3746 Serge 12328
		      INTEL_INFO(dev)->num_pipes,
12329
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
2330 Serge 12330
 
5060 serge 12331
	for_each_pipe(pipe) {
12332
		intel_crtc_init(dev, pipe);
12333
		for_each_sprite(pipe, sprite) {
12334
			ret = intel_plane_init(dev, pipe, sprite);
3031 serge 12335
		if (ret)
4104 Serge 12336
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
5060 serge 12337
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
3746 Serge 12338
		}
2330 Serge 12339
	}
12340
 
4560 Serge 12341
	intel_init_dpio(dev);
12342
	intel_reset_dpio(dev);
12343
 
4104 Serge 12344
	intel_shared_dpll_init(dev);
2330 Serge 12345
 
3031 serge 12346
	/* Just disable it once at startup */
12347
	i915_disable_vga(dev);
12348
	intel_setup_outputs(dev);
3480 Serge 12349
 
12350
	/* Just in case the BIOS is doing something questionable. */
12351
	intel_disable_fbc(dev);
2330 Serge 12352
 
5060 serge 12353
	drm_modeset_lock_all(dev);
12354
	intel_modeset_setup_hw_state(dev, false);
12355
	drm_modeset_unlock_all(dev);
12356
 
12357
	for_each_intel_crtc(dev, crtc) {
12358
		if (!crtc->active)
12359
			continue;
12360
 
12361
		/*
12362
		 * Note that reserving the BIOS fb up front prevents us
12363
		 * from stuffing other stolen allocations like the ring
12364
		 * on top.  This prevents some ugliness at boot time, and
12365
		 * can even allow for smooth boot transitions if the BIOS
12366
		 * fb is large enough for the active pipe configuration.
12367
		 */
12368
		if (dev_priv->display.get_plane_config) {
12369
			dev_priv->display.get_plane_config(crtc,
12370
							   &crtc->plane_config);
12371
			/*
12372
			 * If the fb is shared between multiple heads, we'll
12373
			 * just get the first one.
12374
			 */
12375
			intel_find_plane_obj(crtc, &crtc->plane_config);
12376
		}
12377
	}
2330 Serge 12378
}
12379
 
3031 serge 12380
static void intel_enable_pipe_a(struct drm_device *dev)
2330 Serge 12381
{
3031 serge 12382
	struct intel_connector *connector;
12383
	struct drm_connector *crt = NULL;
12384
	struct intel_load_detect_pipe load_detect_temp;
5060 serge 12385
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
2330 Serge 12386
 
3031 serge 12387
	/* We can't just switch on the pipe A, we need to set things up with a
12388
	 * proper mode and output configuration. As a gross hack, enable pipe A
12389
	 * by enabling the load detect pipe once. */
12390
	list_for_each_entry(connector,
12391
			    &dev->mode_config.connector_list,
12392
			    base.head) {
12393
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
12394
			crt = &connector->base;
12395
			break;
2330 Serge 12396
		}
12397
	}
12398
 
3031 serge 12399
	if (!crt)
12400
		return;
2330 Serge 12401
 
5060 serge 12402
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
3031 serge 12403
		intel_release_load_detect_pipe(crt, &load_detect_temp);
2327 Serge 12404
}
12405
 
3031 serge 12406
static bool
12407
intel_check_plane_mapping(struct intel_crtc *crtc)
2327 Serge 12408
{
3746 Serge 12409
	struct drm_device *dev = crtc->base.dev;
12410
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12411
	u32 reg, val;
2327 Serge 12412
 
3746 Serge 12413
	if (INTEL_INFO(dev)->num_pipes == 1)
3031 serge 12414
		return true;
2327 Serge 12415
 
3031 serge 12416
	reg = DSPCNTR(!crtc->plane);
12417
	val = I915_READ(reg);
2327 Serge 12418
 
3031 serge 12419
	if ((val & DISPLAY_PLANE_ENABLE) &&
12420
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
12421
		return false;
2327 Serge 12422
 
3031 serge 12423
	return true;
2327 Serge 12424
}
12425
 
3031 serge 12426
static void intel_sanitize_crtc(struct intel_crtc *crtc)
2327 Serge 12427
{
3031 serge 12428
	struct drm_device *dev = crtc->base.dev;
2327 Serge 12429
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12430
	u32 reg;
2327 Serge 12431
 
3031 serge 12432
	/* Clear any frame start delays used for debugging left by the BIOS */
3746 Serge 12433
	reg = PIPECONF(crtc->config.cpu_transcoder);
3031 serge 12434
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
2327 Serge 12435
 
5060 serge 12436
	/* restore vblank interrupts to correct state */
12437
	if (crtc->active)
12438
		drm_vblank_on(dev, crtc->pipe);
12439
	else
12440
		drm_vblank_off(dev, crtc->pipe);
12441
 
3031 serge 12442
	/* We need to sanitize the plane -> pipe mapping first because this will
12443
	 * disable the crtc (and hence change the state) if it is wrong. Note
12444
	 * that gen4+ has a fixed plane -> pipe mapping.  */
12445
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
12446
		struct intel_connector *connector;
12447
		bool plane;
2327 Serge 12448
 
3031 serge 12449
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
12450
			      crtc->base.base.id);
2327 Serge 12451
 
3031 serge 12452
		/* Pipe has the wrong plane attached and the plane is active.
12453
		 * Temporarily change the plane mapping and disable everything
12454
		 * ...  */
12455
		plane = crtc->plane;
12456
		crtc->plane = !plane;
5060 serge 12457
		crtc->primary_enabled = true;
3031 serge 12458
		dev_priv->display.crtc_disable(&crtc->base);
12459
		crtc->plane = plane;
2342 Serge 12460
 
3031 serge 12461
		/* ... and break all links. */
12462
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12463
				    base.head) {
12464
			if (connector->encoder->base.crtc != &crtc->base)
12465
				continue;
2327 Serge 12466
 
5060 serge 12467
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12468
			connector->base.encoder = NULL;
3031 serge 12469
		}
5060 serge 12470
		/* multiple connectors may have the same encoder:
12471
		 *  handle them and break crtc link separately */
12472
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12473
				    base.head)
12474
			if (connector->encoder->base.crtc == &crtc->base) {
12475
				connector->encoder->base.crtc = NULL;
12476
				connector->encoder->connectors_active = false;
12477
		}
2327 Serge 12478
 
3031 serge 12479
		WARN_ON(crtc->active);
12480
		crtc->base.enabled = false;
12481
	}
2327 Serge 12482
 
3031 serge 12483
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
12484
	    crtc->pipe == PIPE_A && !crtc->active) {
12485
		/* BIOS forgot to enable pipe A, this mostly happens after
12486
		 * resume. Force-enable the pipe to fix this, the update_dpms
12487
		 * call below we restore the pipe to the right state, but leave
12488
		 * the required bits on. */
12489
		intel_enable_pipe_a(dev);
12490
	}
2327 Serge 12491
 
3031 serge 12492
	/* Adjust the state of the output pipe according to whether we
12493
	 * have active connectors/encoders. */
12494
	intel_crtc_update_dpms(&crtc->base);
2327 Serge 12495
 
3031 serge 12496
	if (crtc->active != crtc->base.enabled) {
12497
		struct intel_encoder *encoder;
2327 Serge 12498
 
3031 serge 12499
		/* This can happen either due to bugs in the get_hw_state
12500
		 * functions or because the pipe is force-enabled due to the
12501
		 * pipe A quirk. */
12502
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
12503
			      crtc->base.base.id,
12504
			      crtc->base.enabled ? "enabled" : "disabled",
12505
			      crtc->active ? "enabled" : "disabled");
2327 Serge 12506
 
3031 serge 12507
		crtc->base.enabled = crtc->active;
2327 Serge 12508
 
3031 serge 12509
		/* Because we only establish the connector -> encoder ->
12510
		 * crtc links if something is active, this means the
12511
		 * crtc is now deactivated. Break the links. connector
12512
		 * -> encoder links are only establish when things are
12513
		 *  actually up, hence no need to break them. */
12514
		WARN_ON(crtc->active);
2327 Serge 12515
 
3031 serge 12516
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
12517
			WARN_ON(encoder->connectors_active);
12518
			encoder->base.crtc = NULL;
12519
		}
12520
	}
5060 serge 12521
 
12522
	if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
12523
		/*
12524
		 * We start out with underrun reporting disabled to avoid races.
12525
		 * For correct bookkeeping mark this on active crtcs.
12526
		 *
12527
		 * Also on gmch platforms we dont have any hardware bits to
12528
		 * disable the underrun reporting. Which means we need to start
12529
		 * out with underrun reporting disabled also on inactive pipes,
12530
		 * since otherwise we'll complain about the garbage we read when
12531
		 * e.g. coming up after runtime pm.
12532
		 *
12533
		 * No protection against concurrent access is required - at
12534
		 * worst a fifo underrun happens which also sets this to false.
12535
		 */
12536
		crtc->cpu_fifo_underrun_disabled = true;
12537
		crtc->pch_fifo_underrun_disabled = true;
12538
 
12539
		update_scanline_offset(crtc);
12540
	}
2327 Serge 12541
}
12542
 
3031 serge 12543
static void intel_sanitize_encoder(struct intel_encoder *encoder)
2327 Serge 12544
{
3031 serge 12545
	struct intel_connector *connector;
12546
	struct drm_device *dev = encoder->base.dev;
2327 Serge 12547
 
3031 serge 12548
	/* We need to check both for a crtc link (meaning that the
12549
	 * encoder is active and trying to read from a pipe) and the
12550
	 * pipe itself being active. */
12551
	bool has_active_crtc = encoder->base.crtc &&
12552
		to_intel_crtc(encoder->base.crtc)->active;
2327 Serge 12553
 
3031 serge 12554
	if (encoder->connectors_active && !has_active_crtc) {
12555
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12556
			      encoder->base.base.id,
5060 serge 12557
			      encoder->base.name);
2327 Serge 12558
 
3031 serge 12559
		/* Connector is active, but has no active pipe. This is
12560
		 * fallout from our resume register restoring. Disable
12561
		 * the encoder manually again. */
12562
		if (encoder->base.crtc) {
12563
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
12564
				      encoder->base.base.id,
5060 serge 12565
				      encoder->base.name);
3031 serge 12566
			encoder->disable(encoder);
5060 serge 12567
			if (encoder->post_disable)
12568
				encoder->post_disable(encoder);
3031 serge 12569
		}
5060 serge 12570
		encoder->base.crtc = NULL;
12571
		encoder->connectors_active = false;
2327 Serge 12572
 
3031 serge 12573
		/* Inconsistent output/port/pipe state happens presumably due to
12574
		 * a bug in one of the get_hw_state functions. Or someplace else
12575
		 * in our code, like the register restore mess on resume. Clamp
12576
		 * things to off as a safer default. */
12577
		list_for_each_entry(connector,
12578
				    &dev->mode_config.connector_list,
12579
				    base.head) {
12580
			if (connector->encoder != encoder)
12581
				continue;
5060 serge 12582
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12583
			connector->base.encoder = NULL;
3031 serge 12584
		}
12585
	}
12586
	/* Enabled encoders without active connectors will be fixed in
12587
	 * the crtc fixup. */
2327 Serge 12588
}
12589
 
5060 serge 12590
void i915_redisable_vga_power_on(struct drm_device *dev)
3746 Serge 12591
{
12592
	struct drm_i915_private *dev_priv = dev->dev_private;
12593
	u32 vga_reg = i915_vgacntrl_reg(dev);
12594
 
5060 serge 12595
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
12596
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
12597
		i915_disable_vga(dev);
12598
	}
12599
}
12600
 
12601
void i915_redisable_vga(struct drm_device *dev)
12602
{
12603
	struct drm_i915_private *dev_priv = dev->dev_private;
12604
 
4104 Serge 12605
	/* This function can be called both from intel_modeset_setup_hw_state or
12606
	 * at a very early point in our resume sequence, where the power well
12607
	 * structures are not yet restored. Since this function is at a very
12608
	 * paranoid "someone might have enabled VGA while we were not looking"
12609
	 * level, just check if the power well is enabled instead of trying to
12610
	 * follow the "don't touch the power well if we don't need it" policy
12611
	 * the rest of the driver uses. */
5060 serge 12612
	if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
4104 Serge 12613
		return;
12614
 
5060 serge 12615
	i915_redisable_vga_power_on(dev);
3746 Serge 12616
}
12617
 
5060 serge 12618
static bool primary_get_hw_state(struct intel_crtc *crtc)
12619
{
12620
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
12621
 
12622
	if (!crtc->active)
12623
		return false;
12624
 
12625
	return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
12626
}
12627
 
4104 Serge 12628
static void intel_modeset_readout_hw_state(struct drm_device *dev)
2332 Serge 12629
{
12630
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12631
	enum pipe pipe;
12632
	struct intel_crtc *crtc;
12633
	struct intel_encoder *encoder;
12634
	struct intel_connector *connector;
4104 Serge 12635
	int i;
2327 Serge 12636
 
5060 serge 12637
	for_each_intel_crtc(dev, crtc) {
3746 Serge 12638
		memset(&crtc->config, 0, sizeof(crtc->config));
2327 Serge 12639
 
5060 serge 12640
		crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
12641
 
3746 Serge 12642
		crtc->active = dev_priv->display.get_pipe_config(crtc,
12643
								 &crtc->config);
2327 Serge 12644
 
3031 serge 12645
		crtc->base.enabled = crtc->active;
5060 serge 12646
		crtc->primary_enabled = primary_get_hw_state(crtc);
2330 Serge 12647
 
3031 serge 12648
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
12649
			      crtc->base.base.id,
12650
			      crtc->active ? "enabled" : "disabled");
2339 Serge 12651
	}
2332 Serge 12652
 
4104 Serge 12653
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12654
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12655
 
12656
		pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
12657
		pll->active = 0;
5060 serge 12658
		for_each_intel_crtc(dev, crtc) {
4104 Serge 12659
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12660
				pll->active++;
12661
		}
12662
		pll->refcount = pll->active;
12663
 
12664
		DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
12665
			      pll->name, pll->refcount, pll->on);
5060 serge 12666
 
12667
		if (pll->refcount)
12668
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
4104 Serge 12669
	}
12670
 
3031 serge 12671
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
12672
			    base.head) {
12673
		pipe = 0;
2332 Serge 12674
 
3031 serge 12675
		if (encoder->get_hw_state(encoder, &pipe)) {
4104 Serge 12676
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12677
			encoder->base.crtc = &crtc->base;
12678
				encoder->get_config(encoder, &crtc->config);
3031 serge 12679
		} else {
12680
			encoder->base.crtc = NULL;
12681
		}
2332 Serge 12682
 
3031 serge 12683
		encoder->connectors_active = false;
4560 Serge 12684
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
3031 serge 12685
			      encoder->base.base.id,
5060 serge 12686
			      encoder->base.name,
3031 serge 12687
			      encoder->base.crtc ? "enabled" : "disabled",
4560 Serge 12688
			      pipe_name(pipe));
3031 serge 12689
	}
2332 Serge 12690
 
3031 serge 12691
	list_for_each_entry(connector, &dev->mode_config.connector_list,
12692
			    base.head) {
12693
		if (connector->get_hw_state(connector)) {
12694
			connector->base.dpms = DRM_MODE_DPMS_ON;
12695
			connector->encoder->connectors_active = true;
12696
			connector->base.encoder = &connector->encoder->base;
12697
		} else {
12698
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12699
			connector->base.encoder = NULL;
12700
		}
12701
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
12702
			      connector->base.base.id,
5060 serge 12703
			      connector->base.name,
3031 serge 12704
			      connector->base.encoder ? "enabled" : "disabled");
2332 Serge 12705
	}
4104 Serge 12706
}
2332 Serge 12707
 
4104 Serge 12708
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
12709
 * and i915 state tracking structures. */
12710
void intel_modeset_setup_hw_state(struct drm_device *dev,
12711
				  bool force_restore)
12712
{
12713
	struct drm_i915_private *dev_priv = dev->dev_private;
12714
	enum pipe pipe;
12715
	struct intel_crtc *crtc;
12716
	struct intel_encoder *encoder;
12717
	int i;
12718
 
12719
	intel_modeset_readout_hw_state(dev);
12720
 
12721
	/*
12722
	 * Now that we have the config, copy it to each CRTC struct
12723
	 * Note that this could go away if we move to using crtc_config
12724
	 * checking everywhere.
12725
	 */
5060 serge 12726
	for_each_intel_crtc(dev, crtc) {
12727
		if (crtc->active && i915.fastboot) {
12728
			intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
4104 Serge 12729
			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
12730
				      crtc->base.base.id);
12731
			drm_mode_debug_printmodeline(&crtc->base.mode);
12732
		}
12733
	}
12734
 
3031 serge 12735
	/* HW state is read out, now we need to sanitize this mess. */
12736
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
12737
			    base.head) {
12738
		intel_sanitize_encoder(encoder);
2332 Serge 12739
	}
12740
 
3031 serge 12741
	for_each_pipe(pipe) {
12742
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12743
		intel_sanitize_crtc(crtc);
4104 Serge 12744
		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
2332 Serge 12745
	}
12746
 
4104 Serge 12747
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12748
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12749
 
12750
		if (!pll->on || pll->active)
12751
			continue;
12752
 
12753
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
12754
 
12755
		pll->disable(dev_priv, pll);
12756
		pll->on = false;
12757
	}
12758
 
4560 Serge 12759
	if (HAS_PCH_SPLIT(dev))
12760
		ilk_wm_get_hw_state(dev);
12761
 
3243 Serge 12762
	if (force_restore) {
4560 Serge 12763
		i915_redisable_vga(dev);
12764
 
3746 Serge 12765
		/*
12766
		 * We need to use raw interfaces for restoring state to avoid
12767
		 * checking (bogus) intermediate states.
12768
		 */
3243 Serge 12769
		for_each_pipe(pipe) {
3746 Serge 12770
			struct drm_crtc *crtc =
12771
				dev_priv->pipe_to_crtc_mapping[pipe];
12772
 
12773
			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
5060 serge 12774
					 crtc->primary->fb);
3243 Serge 12775
		}
12776
	} else {
3031 serge 12777
	intel_modeset_update_staged_output_state(dev);
3243 Serge 12778
	}
2332 Serge 12779
 
3031 serge 12780
	intel_modeset_check_state(dev);
2332 Serge 12781
}
12782
 
3031 serge 12783
void intel_modeset_gem_init(struct drm_device *dev)
2330 Serge 12784
{
5060 serge 12785
	struct drm_crtc *c;
12786
	struct drm_i915_gem_object *obj;
12787
 
12788
	mutex_lock(&dev->struct_mutex);
12789
	intel_init_gt_powersave(dev);
12790
	mutex_unlock(&dev->struct_mutex);
12791
 
3031 serge 12792
	intel_modeset_init_hw(dev);
2330 Serge 12793
 
3031 serge 12794
//   intel_setup_overlay(dev);
2330 Serge 12795
 
5060 serge 12796
	/*
12797
	 * Make sure any fbs we allocated at startup are properly
12798
	 * pinned & fenced.  When we do the allocation it's too early
12799
	 * for this.
12800
	 */
12801
	mutex_lock(&dev->struct_mutex);
12802
	for_each_crtc(dev, c) {
12803
		obj = intel_fb_obj(c->primary->fb);
12804
		if (obj == NULL)
12805
			continue;
12806
 
12807
		if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
12808
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
12809
				  to_intel_crtc(c)->pipe);
12810
			drm_framebuffer_unreference(c->primary->fb);
12811
			c->primary->fb = NULL;
12812
		}
12813
	}
12814
	mutex_unlock(&dev->struct_mutex);
2330 Serge 12815
}
12816
 
5060 serge 12817
void intel_connector_unregister(struct intel_connector *intel_connector)
12818
{
12819
	struct drm_connector *connector = &intel_connector->base;
12820
 
12821
	intel_panel_destroy_backlight(connector);
12822
	drm_connector_unregister(connector);
12823
}
12824
 
3031 serge 12825
void intel_modeset_cleanup(struct drm_device *dev)
2327 Serge 12826
{
3031 serge 12827
#if 0
12828
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 12829
	struct drm_connector *connector;
2327 Serge 12830
 
4104 Serge 12831
	/*
12832
	 * Interrupts and polling as the first thing to avoid creating havoc.
12833
	 * Too much stuff here (turning of rps, connectors, ...) would
12834
	 * experience fancy races otherwise.
12835
	 */
12836
	drm_irq_uninstall(dev);
5060 serge 12837
	intel_hpd_cancel_work(dev_priv);
12838
	dev_priv->pm._irqs_disabled = true;
12839
 
4104 Serge 12840
	/*
12841
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
12842
	 * poll handlers. Hence disable polling after hpd handling is shut down.
12843
	 */
4560 Serge 12844
	drm_kms_helper_poll_fini(dev);
4104 Serge 12845
 
3031 serge 12846
	mutex_lock(&dev->struct_mutex);
2327 Serge 12847
 
4560 Serge 12848
	intel_unregister_dsm_handler();
2327 Serge 12849
 
3031 serge 12850
	intel_disable_fbc(dev);
2342 Serge 12851
 
3031 serge 12852
	intel_disable_gt_powersave(dev);
2342 Serge 12853
 
3031 serge 12854
	ironlake_teardown_rc6(dev);
2327 Serge 12855
 
3031 serge 12856
	mutex_unlock(&dev->struct_mutex);
2327 Serge 12857
 
4104 Serge 12858
	/* flush any delayed tasks or pending work */
12859
	flush_scheduled_work();
2327 Serge 12860
 
4560 Serge 12861
	/* destroy the backlight and sysfs files before encoders/connectors */
12862
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
5060 serge 12863
		struct intel_connector *intel_connector;
12864
 
12865
		intel_connector = to_intel_connector(connector);
12866
		intel_connector->unregister(intel_connector);
4560 Serge 12867
	}
2327 Serge 12868
 
3031 serge 12869
	drm_mode_config_cleanup(dev);
5060 serge 12870
 
12871
	intel_cleanup_overlay(dev);
12872
 
12873
	mutex_lock(&dev->struct_mutex);
12874
	intel_cleanup_gt_powersave(dev);
12875
	mutex_unlock(&dev->struct_mutex);
2327 Serge 12876
#endif
12877
}
12878
 
12879
/*
3031 serge 12880
 * Return which encoder is currently attached for connector.
2327 Serge 12881
 */
3031 serge 12882
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
2327 Serge 12883
{
3031 serge 12884
	return &intel_attached_encoder(connector)->base;
12885
}
2327 Serge 12886
 
3031 serge 12887
void intel_connector_attach_encoder(struct intel_connector *connector,
12888
				    struct intel_encoder *encoder)
12889
{
12890
	connector->encoder = encoder;
12891
	drm_mode_connector_attach_encoder(&connector->base,
12892
					  &encoder->base);
2327 Serge 12893
}
12894
 
12895
/*
3031 serge 12896
 * set vga decode state - true == enable VGA decode
2327 Serge 12897
 */
3031 serge 12898
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
2327 Serge 12899
{
2330 Serge 12900
	struct drm_i915_private *dev_priv = dev->dev_private;
4539 Serge 12901
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
3031 serge 12902
	u16 gmch_ctrl;
2327 Serge 12903
 
5060 serge 12904
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
12905
		DRM_ERROR("failed to read control word\n");
12906
		return -EIO;
12907
	}
12908
 
12909
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
12910
		return 0;
12911
 
3031 serge 12912
	if (state)
12913
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
2330 Serge 12914
	else
3031 serge 12915
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
5060 serge 12916
 
12917
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
12918
		DRM_ERROR("failed to write control word\n");
12919
		return -EIO;
12920
	}
12921
 
3031 serge 12922
	return 0;
2330 Serge 12923
}
12924
 
3031 serge 12925
#ifdef CONFIG_DEBUG_FS
2327 Serge 12926
 
3031 serge 12927
struct intel_display_error_state {
4104 Serge 12928
 
12929
	u32 power_well_driver;
12930
 
12931
	int num_transcoders;
12932
 
3031 serge 12933
	struct intel_cursor_error_state {
12934
		u32 control;
12935
		u32 position;
12936
		u32 base;
12937
		u32 size;
12938
	} cursor[I915_MAX_PIPES];
2327 Serge 12939
 
3031 serge 12940
	struct intel_pipe_error_state {
4560 Serge 12941
		bool power_domain_on;
3031 serge 12942
		u32 source;
5060 serge 12943
		u32 stat;
3031 serge 12944
	} pipe[I915_MAX_PIPES];
2327 Serge 12945
 
3031 serge 12946
	struct intel_plane_error_state {
12947
		u32 control;
12948
		u32 stride;
12949
		u32 size;
12950
		u32 pos;
12951
		u32 addr;
12952
		u32 surface;
12953
		u32 tile_offset;
12954
	} plane[I915_MAX_PIPES];
4104 Serge 12955
 
12956
	struct intel_transcoder_error_state {
4560 Serge 12957
		bool power_domain_on;
4104 Serge 12958
		enum transcoder cpu_transcoder;
12959
 
12960
		u32 conf;
12961
 
12962
		u32 htotal;
12963
		u32 hblank;
12964
		u32 hsync;
12965
		u32 vtotal;
12966
		u32 vblank;
12967
		u32 vsync;
12968
	} transcoder[4];
3031 serge 12969
};
2327 Serge 12970
 
3031 serge 12971
struct intel_display_error_state *
12972
intel_display_capture_error_state(struct drm_device *dev)
12973
{
5060 serge 12974
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 12975
	struct intel_display_error_state *error;
4104 Serge 12976
	int transcoders[] = {
12977
		TRANSCODER_A,
12978
		TRANSCODER_B,
12979
		TRANSCODER_C,
12980
		TRANSCODER_EDP,
12981
	};
3031 serge 12982
	int i;
2327 Serge 12983
 
4104 Serge 12984
	if (INTEL_INFO(dev)->num_pipes == 0)
12985
		return NULL;
12986
 
4560 Serge 12987
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
3031 serge 12988
	if (error == NULL)
12989
		return NULL;
2327 Serge 12990
 
4560 Serge 12991
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 12992
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
12993
 
3031 serge 12994
	for_each_pipe(i) {
4560 Serge 12995
		error->pipe[i].power_domain_on =
5060 serge 12996
			intel_display_power_enabled_unlocked(dev_priv,
12997
						       POWER_DOMAIN_PIPE(i));
4560 Serge 12998
		if (!error->pipe[i].power_domain_on)
12999
			continue;
13000
 
3031 serge 13001
		error->cursor[i].control = I915_READ(CURCNTR(i));
13002
		error->cursor[i].position = I915_READ(CURPOS(i));
13003
		error->cursor[i].base = I915_READ(CURBASE(i));
2327 Serge 13004
 
3031 serge 13005
		error->plane[i].control = I915_READ(DSPCNTR(i));
13006
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
3746 Serge 13007
		if (INTEL_INFO(dev)->gen <= 3) {
3031 serge 13008
		error->plane[i].size = I915_READ(DSPSIZE(i));
13009
		error->plane[i].pos = I915_READ(DSPPOS(i));
3746 Serge 13010
		}
13011
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3031 serge 13012
		error->plane[i].addr = I915_READ(DSPADDR(i));
13013
		if (INTEL_INFO(dev)->gen >= 4) {
13014
			error->plane[i].surface = I915_READ(DSPSURF(i));
13015
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
13016
		}
2327 Serge 13017
 
3031 serge 13018
		error->pipe[i].source = I915_READ(PIPESRC(i));
5060 serge 13019
 
13020
		if (HAS_GMCH_DISPLAY(dev))
13021
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
3031 serge 13022
	}
2327 Serge 13023
 
4104 Serge 13024
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
13025
	if (HAS_DDI(dev_priv->dev))
13026
		error->num_transcoders++; /* Account for eDP. */
13027
 
13028
	for (i = 0; i < error->num_transcoders; i++) {
13029
		enum transcoder cpu_transcoder = transcoders[i];
13030
 
4560 Serge 13031
		error->transcoder[i].power_domain_on =
5060 serge 13032
			intel_display_power_enabled_unlocked(dev_priv,
4560 Serge 13033
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13034
		if (!error->transcoder[i].power_domain_on)
13035
			continue;
13036
 
4104 Serge 13037
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
13038
 
13039
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
13040
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
13041
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
13042
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
13043
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
13044
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
13045
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
13046
	}
13047
 
3031 serge 13048
	return error;
2330 Serge 13049
}
2327 Serge 13050
 
4104 Serge 13051
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13052
 
3031 serge 13053
void
4104 Serge 13054
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
3031 serge 13055
				struct drm_device *dev,
13056
				struct intel_display_error_state *error)
2332 Serge 13057
{
3031 serge 13058
	int i;
2330 Serge 13059
 
4104 Serge 13060
	if (!error)
13061
		return;
13062
 
13063
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
4560 Serge 13064
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 13065
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
13066
			   error->power_well_driver);
3031 serge 13067
	for_each_pipe(i) {
4104 Serge 13068
		err_printf(m, "Pipe [%d]:\n", i);
4560 Serge 13069
		err_printf(m, "  Power: %s\n",
13070
			   error->pipe[i].power_domain_on ? "on" : "off");
4104 Serge 13071
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
5060 serge 13072
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
2332 Serge 13073
 
4104 Serge 13074
		err_printf(m, "Plane [%d]:\n", i);
13075
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13076
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
3746 Serge 13077
		if (INTEL_INFO(dev)->gen <= 3) {
4104 Serge 13078
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13079
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
3746 Serge 13080
		}
13081
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
4104 Serge 13082
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
3031 serge 13083
		if (INTEL_INFO(dev)->gen >= 4) {
4104 Serge 13084
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13085
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
3031 serge 13086
		}
2332 Serge 13087
 
4104 Serge 13088
		err_printf(m, "Cursor [%d]:\n", i);
13089
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13090
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13091
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
3031 serge 13092
	}
4104 Serge 13093
 
13094
	for (i = 0; i < error->num_transcoders; i++) {
4560 Serge 13095
		err_printf(m, "CPU transcoder: %c\n",
4104 Serge 13096
			   transcoder_name(error->transcoder[i].cpu_transcoder));
4560 Serge 13097
		err_printf(m, "  Power: %s\n",
13098
			   error->transcoder[i].power_domain_on ? "on" : "off");
4104 Serge 13099
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
13100
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
13101
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
13102
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
13103
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
13104
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
13105
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
13106
	}
2327 Serge 13107
}
3031 serge 13108
#endif