Subversion Repositories Kolibri OS

Rev

Rev 4557 | Rev 5060 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
3746 Serge 27
//#include 
2327 Serge 28
#include 
29
//#include 
30
#include 
31
#include 
2330 Serge 32
#include 
3746 Serge 33
#include 
2342 Serge 34
#include 
3031 serge 35
#include 
2327 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2327 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
3031 serge 40
#include 
41
#include 
42
//#include 
2327 Serge 43
 
4104 Serge 44
#define MAX_ERRNO       4095
2327 Serge 45
phys_addr_t get_bus_addr(void);
46
 
4560 Serge 47
static inline void outb(u8 v, u16 port)
48
{
49
    asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
50
}
51
static inline u8 inb(u16 port)
52
{
53
    u8 v;
54
    asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
55
    return v;
56
}
57
 
2327 Serge 58
static void intel_increase_pllclock(struct drm_crtc *crtc);
3243 Serge 59
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
2327 Serge 60
 
4104 Serge 61
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
62
				struct intel_crtc_config *pipe_config);
4560 Serge 63
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
4104 Serge 64
				    struct intel_crtc_config *pipe_config);
2327 Serge 65
 
4104 Serge 66
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
67
			  int x, int y, struct drm_framebuffer *old_fb);
68
 
69
 
2327 Serge 70
typedef struct {
71
    int min, max;
72
} intel_range_t;
73
 
74
typedef struct {
75
    int dot_limit;
76
    int p2_slow, p2_fast;
77
} intel_p2_t;
78
 
79
typedef struct intel_limit intel_limit_t;
80
struct intel_limit {
81
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
82
    intel_p2_t      p2;
83
};
84
 
3243 Serge 85
int
86
intel_pch_rawclk(struct drm_device *dev)
87
{
88
	struct drm_i915_private *dev_priv = dev->dev_private;
89
 
90
	WARN_ON(!HAS_PCH_SPLIT(dev));
91
 
92
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
93
}
94
 
2327 Serge 95
static inline u32 /* units of 100MHz */
96
intel_fdi_link_freq(struct drm_device *dev)
97
{
98
	if (IS_GEN5(dev)) {
99
		struct drm_i915_private *dev_priv = dev->dev_private;
100
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
101
	} else
102
		return 27;
103
}
104
 
4104 Serge 105
static const intel_limit_t intel_limits_i8xx_dac = {
106
	.dot = { .min = 25000, .max = 350000 },
4560 Serge 107
	.vco = { .min = 908000, .max = 1512000 },
108
	.n = { .min = 2, .max = 16 },
4104 Serge 109
	.m = { .min = 96, .max = 140 },
110
	.m1 = { .min = 18, .max = 26 },
111
	.m2 = { .min = 6, .max = 16 },
112
	.p = { .min = 4, .max = 128 },
113
	.p1 = { .min = 2, .max = 33 },
114
	.p2 = { .dot_limit = 165000,
115
		.p2_slow = 4, .p2_fast = 2 },
116
};
117
 
2327 Serge 118
static const intel_limit_t intel_limits_i8xx_dvo = {
119
        .dot = { .min = 25000, .max = 350000 },
4560 Serge 120
	.vco = { .min = 908000, .max = 1512000 },
121
	.n = { .min = 2, .max = 16 },
2327 Serge 122
        .m = { .min = 96, .max = 140 },
123
        .m1 = { .min = 18, .max = 26 },
124
        .m2 = { .min = 6, .max = 16 },
125
        .p = { .min = 4, .max = 128 },
126
        .p1 = { .min = 2, .max = 33 },
127
	.p2 = { .dot_limit = 165000,
4104 Serge 128
		.p2_slow = 4, .p2_fast = 4 },
2327 Serge 129
};
130
 
131
static const intel_limit_t intel_limits_i8xx_lvds = {
132
        .dot = { .min = 25000, .max = 350000 },
4560 Serge 133
	.vco = { .min = 908000, .max = 1512000 },
134
	.n = { .min = 2, .max = 16 },
2327 Serge 135
        .m = { .min = 96, .max = 140 },
136
        .m1 = { .min = 18, .max = 26 },
137
        .m2 = { .min = 6, .max = 16 },
138
        .p = { .min = 4, .max = 128 },
139
        .p1 = { .min = 1, .max = 6 },
140
	.p2 = { .dot_limit = 165000,
141
		.p2_slow = 14, .p2_fast = 7 },
142
};
143
 
144
static const intel_limit_t intel_limits_i9xx_sdvo = {
145
        .dot = { .min = 20000, .max = 400000 },
146
        .vco = { .min = 1400000, .max = 2800000 },
147
        .n = { .min = 1, .max = 6 },
148
        .m = { .min = 70, .max = 120 },
3480 Serge 149
	.m1 = { .min = 8, .max = 18 },
150
	.m2 = { .min = 3, .max = 7 },
2327 Serge 151
        .p = { .min = 5, .max = 80 },
152
        .p1 = { .min = 1, .max = 8 },
153
	.p2 = { .dot_limit = 200000,
154
		.p2_slow = 10, .p2_fast = 5 },
155
};
156
 
157
static const intel_limit_t intel_limits_i9xx_lvds = {
158
        .dot = { .min = 20000, .max = 400000 },
159
        .vco = { .min = 1400000, .max = 2800000 },
160
        .n = { .min = 1, .max = 6 },
161
        .m = { .min = 70, .max = 120 },
3480 Serge 162
	.m1 = { .min = 8, .max = 18 },
163
	.m2 = { .min = 3, .max = 7 },
2327 Serge 164
        .p = { .min = 7, .max = 98 },
165
        .p1 = { .min = 1, .max = 8 },
166
	.p2 = { .dot_limit = 112000,
167
		.p2_slow = 14, .p2_fast = 7 },
168
};
169
 
170
 
171
static const intel_limit_t intel_limits_g4x_sdvo = {
172
	.dot = { .min = 25000, .max = 270000 },
173
	.vco = { .min = 1750000, .max = 3500000},
174
	.n = { .min = 1, .max = 4 },
175
	.m = { .min = 104, .max = 138 },
176
	.m1 = { .min = 17, .max = 23 },
177
	.m2 = { .min = 5, .max = 11 },
178
	.p = { .min = 10, .max = 30 },
179
	.p1 = { .min = 1, .max = 3},
180
	.p2 = { .dot_limit = 270000,
181
		.p2_slow = 10,
182
		.p2_fast = 10
183
	},
184
};
185
 
186
static const intel_limit_t intel_limits_g4x_hdmi = {
187
	.dot = { .min = 22000, .max = 400000 },
188
	.vco = { .min = 1750000, .max = 3500000},
189
	.n = { .min = 1, .max = 4 },
190
	.m = { .min = 104, .max = 138 },
191
	.m1 = { .min = 16, .max = 23 },
192
	.m2 = { .min = 5, .max = 11 },
193
	.p = { .min = 5, .max = 80 },
194
	.p1 = { .min = 1, .max = 8},
195
	.p2 = { .dot_limit = 165000,
196
		.p2_slow = 10, .p2_fast = 5 },
197
};
198
 
199
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
200
	.dot = { .min = 20000, .max = 115000 },
201
	.vco = { .min = 1750000, .max = 3500000 },
202
	.n = { .min = 1, .max = 3 },
203
	.m = { .min = 104, .max = 138 },
204
	.m1 = { .min = 17, .max = 23 },
205
	.m2 = { .min = 5, .max = 11 },
206
	.p = { .min = 28, .max = 112 },
207
	.p1 = { .min = 2, .max = 8 },
208
	.p2 = { .dot_limit = 0,
209
		.p2_slow = 14, .p2_fast = 14
210
	},
211
};
212
 
213
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
214
	.dot = { .min = 80000, .max = 224000 },
215
	.vco = { .min = 1750000, .max = 3500000 },
216
	.n = { .min = 1, .max = 3 },
217
	.m = { .min = 104, .max = 138 },
218
	.m1 = { .min = 17, .max = 23 },
219
	.m2 = { .min = 5, .max = 11 },
220
	.p = { .min = 14, .max = 42 },
221
	.p1 = { .min = 2, .max = 6 },
222
	.p2 = { .dot_limit = 0,
223
		.p2_slow = 7, .p2_fast = 7
224
	},
225
};
226
 
227
static const intel_limit_t intel_limits_pineview_sdvo = {
228
        .dot = { .min = 20000, .max = 400000},
229
        .vco = { .min = 1700000, .max = 3500000 },
230
	/* Pineview's Ncounter is a ring counter */
231
        .n = { .min = 3, .max = 6 },
232
        .m = { .min = 2, .max = 256 },
233
	/* Pineview only has one combined m divider, which we treat as m2. */
234
        .m1 = { .min = 0, .max = 0 },
235
        .m2 = { .min = 0, .max = 254 },
236
        .p = { .min = 5, .max = 80 },
237
        .p1 = { .min = 1, .max = 8 },
238
	.p2 = { .dot_limit = 200000,
239
		.p2_slow = 10, .p2_fast = 5 },
240
};
241
 
242
static const intel_limit_t intel_limits_pineview_lvds = {
243
        .dot = { .min = 20000, .max = 400000 },
244
        .vco = { .min = 1700000, .max = 3500000 },
245
        .n = { .min = 3, .max = 6 },
246
        .m = { .min = 2, .max = 256 },
247
        .m1 = { .min = 0, .max = 0 },
248
        .m2 = { .min = 0, .max = 254 },
249
        .p = { .min = 7, .max = 112 },
250
        .p1 = { .min = 1, .max = 8 },
251
	.p2 = { .dot_limit = 112000,
252
		.p2_slow = 14, .p2_fast = 14 },
253
};
254
 
255
/* Ironlake / Sandybridge
256
 *
257
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
258
 * the range value for them is (actual_value - 2).
259
 */
260
static const intel_limit_t intel_limits_ironlake_dac = {
261
	.dot = { .min = 25000, .max = 350000 },
262
	.vco = { .min = 1760000, .max = 3510000 },
263
	.n = { .min = 1, .max = 5 },
264
	.m = { .min = 79, .max = 127 },
265
	.m1 = { .min = 12, .max = 22 },
266
	.m2 = { .min = 5, .max = 9 },
267
	.p = { .min = 5, .max = 80 },
268
	.p1 = { .min = 1, .max = 8 },
269
	.p2 = { .dot_limit = 225000,
270
		.p2_slow = 10, .p2_fast = 5 },
271
};
272
 
273
static const intel_limit_t intel_limits_ironlake_single_lvds = {
274
	.dot = { .min = 25000, .max = 350000 },
275
	.vco = { .min = 1760000, .max = 3510000 },
276
	.n = { .min = 1, .max = 3 },
277
	.m = { .min = 79, .max = 118 },
278
	.m1 = { .min = 12, .max = 22 },
279
	.m2 = { .min = 5, .max = 9 },
280
	.p = { .min = 28, .max = 112 },
281
	.p1 = { .min = 2, .max = 8 },
282
	.p2 = { .dot_limit = 225000,
283
		.p2_slow = 14, .p2_fast = 14 },
284
};
285
 
286
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
287
	.dot = { .min = 25000, .max = 350000 },
288
	.vco = { .min = 1760000, .max = 3510000 },
289
	.n = { .min = 1, .max = 3 },
290
	.m = { .min = 79, .max = 127 },
291
	.m1 = { .min = 12, .max = 22 },
292
	.m2 = { .min = 5, .max = 9 },
293
	.p = { .min = 14, .max = 56 },
294
	.p1 = { .min = 2, .max = 8 },
295
	.p2 = { .dot_limit = 225000,
296
		.p2_slow = 7, .p2_fast = 7 },
297
};
298
 
299
/* LVDS 100mhz refclk limits. */
300
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
301
	.dot = { .min = 25000, .max = 350000 },
302
	.vco = { .min = 1760000, .max = 3510000 },
303
	.n = { .min = 1, .max = 2 },
304
	.m = { .min = 79, .max = 126 },
305
	.m1 = { .min = 12, .max = 22 },
306
	.m2 = { .min = 5, .max = 9 },
307
	.p = { .min = 28, .max = 112 },
2342 Serge 308
	.p1 = { .min = 2, .max = 8 },
2327 Serge 309
	.p2 = { .dot_limit = 225000,
310
		.p2_slow = 14, .p2_fast = 14 },
311
};
312
 
313
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
314
	.dot = { .min = 25000, .max = 350000 },
315
	.vco = { .min = 1760000, .max = 3510000 },
316
	.n = { .min = 1, .max = 3 },
317
	.m = { .min = 79, .max = 126 },
318
	.m1 = { .min = 12, .max = 22 },
319
	.m2 = { .min = 5, .max = 9 },
320
	.p = { .min = 14, .max = 42 },
2342 Serge 321
	.p1 = { .min = 2, .max = 6 },
2327 Serge 322
	.p2 = { .dot_limit = 225000,
323
		.p2_slow = 7, .p2_fast = 7 },
324
};
325
 
4560 Serge 326
static const intel_limit_t intel_limits_vlv = {
327
	 /*
328
	  * These are the data rate limits (measured in fast clocks)
329
	  * since those are the strictest limits we have. The fast
330
	  * clock and actual rate limits are more relaxed, so checking
331
	  * them would make no difference.
332
	  */
333
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
3031 serge 334
	.vco = { .min = 4000000, .max = 6000000 },
335
	.n = { .min = 1, .max = 7 },
336
	.m1 = { .min = 2, .max = 3 },
337
	.m2 = { .min = 11, .max = 156 },
338
	.p1 = { .min = 2, .max = 3 },
4560 Serge 339
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
3031 serge 340
};
341
 
4560 Serge 342
static void vlv_clock(int refclk, intel_clock_t *clock)
343
{
344
	clock->m = clock->m1 * clock->m2;
345
	clock->p = clock->p1 * clock->p2;
346
	if (WARN_ON(clock->n == 0 || clock->p == 0))
347
		return;
348
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
349
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
350
}
3031 serge 351
 
4560 Serge 352
/**
353
 * Returns whether any output on the specified pipe is of the specified type
354
 */
355
static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
356
{
357
	struct drm_device *dev = crtc->dev;
358
	struct intel_encoder *encoder;
359
 
360
	for_each_encoder_on_crtc(dev, crtc, encoder)
361
		if (encoder->type == type)
362
			return true;
363
 
364
	return false;
365
}
366
 
2327 Serge 367
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
368
						int refclk)
369
{
370
	struct drm_device *dev = crtc->dev;
371
	const intel_limit_t *limit;
372
 
373
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 374
		if (intel_is_dual_link_lvds(dev)) {
2327 Serge 375
			if (refclk == 100000)
376
				limit = &intel_limits_ironlake_dual_lvds_100m;
377
			else
378
				limit = &intel_limits_ironlake_dual_lvds;
379
		} else {
380
			if (refclk == 100000)
381
				limit = &intel_limits_ironlake_single_lvds_100m;
382
			else
383
				limit = &intel_limits_ironlake_single_lvds;
384
		}
4104 Serge 385
	} else
2327 Serge 386
		limit = &intel_limits_ironlake_dac;
387
 
388
	return limit;
389
}
390
 
391
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
392
{
393
	struct drm_device *dev = crtc->dev;
394
	const intel_limit_t *limit;
395
 
396
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 397
		if (intel_is_dual_link_lvds(dev))
2327 Serge 398
			limit = &intel_limits_g4x_dual_channel_lvds;
399
		else
400
			limit = &intel_limits_g4x_single_channel_lvds;
401
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
402
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
403
		limit = &intel_limits_g4x_hdmi;
404
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
405
		limit = &intel_limits_g4x_sdvo;
406
	} else /* The option is for other outputs */
407
		limit = &intel_limits_i9xx_sdvo;
408
 
409
	return limit;
410
}
411
 
412
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
413
{
414
	struct drm_device *dev = crtc->dev;
415
	const intel_limit_t *limit;
416
 
417
	if (HAS_PCH_SPLIT(dev))
418
		limit = intel_ironlake_limit(crtc, refclk);
419
	else if (IS_G4X(dev)) {
420
		limit = intel_g4x_limit(crtc);
421
	} else if (IS_PINEVIEW(dev)) {
422
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
423
			limit = &intel_limits_pineview_lvds;
424
		else
425
			limit = &intel_limits_pineview_sdvo;
3031 serge 426
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 427
		limit = &intel_limits_vlv;
2327 Serge 428
	} else if (!IS_GEN2(dev)) {
429
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
430
			limit = &intel_limits_i9xx_lvds;
431
		else
432
			limit = &intel_limits_i9xx_sdvo;
433
	} else {
434
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
435
			limit = &intel_limits_i8xx_lvds;
4104 Serge 436
		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
437
			limit = &intel_limits_i8xx_dvo;
2327 Serge 438
		else
4104 Serge 439
			limit = &intel_limits_i8xx_dac;
2327 Serge 440
	}
441
	return limit;
442
}
443
 
444
/* m1 is reserved as 0 in Pineview, n is a ring counter */
445
static void pineview_clock(int refclk, intel_clock_t *clock)
446
{
447
	clock->m = clock->m2 + 2;
448
	clock->p = clock->p1 * clock->p2;
4560 Serge 449
	if (WARN_ON(clock->n == 0 || clock->p == 0))
450
		return;
451
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
452
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
2327 Serge 453
}
454
 
4104 Serge 455
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
2327 Serge 456
{
4104 Serge 457
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
458
}
459
 
460
static void i9xx_clock(int refclk, intel_clock_t *clock)
461
{
462
	clock->m = i9xx_dpll_compute_m(clock);
2327 Serge 463
	clock->p = clock->p1 * clock->p2;
4560 Serge 464
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
465
		return;
466
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
467
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
2327 Serge 468
}
469
 
470
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
471
/**
472
 * Returns whether the given set of divisors are valid for a given refclk with
473
 * the given connectors.
474
 */
475
 
476
static bool intel_PLL_is_valid(struct drm_device *dev,
477
			       const intel_limit_t *limit,
478
			       const intel_clock_t *clock)
479
{
4560 Serge 480
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
481
		INTELPllInvalid("n out of range\n");
2327 Serge 482
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
2342 Serge 483
		INTELPllInvalid("p1 out of range\n");
2327 Serge 484
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
2342 Serge 485
		INTELPllInvalid("m2 out of range\n");
2327 Serge 486
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
2342 Serge 487
		INTELPllInvalid("m1 out of range\n");
4560 Serge 488
 
489
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
490
		if (clock->m1 <= clock->m2)
2342 Serge 491
		INTELPllInvalid("m1 <= m2\n");
4560 Serge 492
 
493
	if (!IS_VALLEYVIEW(dev)) {
494
		if (clock->p < limit->p.min || limit->p.max < clock->p)
495
			INTELPllInvalid("p out of range\n");
2327 Serge 496
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
2342 Serge 497
		INTELPllInvalid("m out of range\n");
4560 Serge 498
	}
499
 
2327 Serge 500
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
2342 Serge 501
		INTELPllInvalid("vco out of range\n");
2327 Serge 502
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
503
	 * connector, etc., rather than just a single range.
504
	 */
505
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
2342 Serge 506
		INTELPllInvalid("dot out of range\n");
2327 Serge 507
 
508
	return true;
509
}
510
 
511
static bool
4104 Serge 512
i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 513
		    int target, int refclk, intel_clock_t *match_clock,
514
		    intel_clock_t *best_clock)
2327 Serge 515
{
516
	struct drm_device *dev = crtc->dev;
517
	intel_clock_t clock;
518
	int err = target;
519
 
3480 Serge 520
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2327 Serge 521
		/*
3480 Serge 522
		 * For LVDS just rely on its current settings for dual-channel.
523
		 * We haven't figured out how to reliably set up different
524
		 * single/dual channel state, if we even can.
2327 Serge 525
		 */
3480 Serge 526
		if (intel_is_dual_link_lvds(dev))
2327 Serge 527
			clock.p2 = limit->p2.p2_fast;
528
		else
529
			clock.p2 = limit->p2.p2_slow;
530
	} else {
531
		if (target < limit->p2.dot_limit)
532
			clock.p2 = limit->p2.p2_slow;
533
		else
534
			clock.p2 = limit->p2.p2_fast;
535
	}
536
 
2342 Serge 537
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 538
 
539
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
540
	     clock.m1++) {
541
		for (clock.m2 = limit->m2.min;
542
		     clock.m2 <= limit->m2.max; clock.m2++) {
4104 Serge 543
			if (clock.m2 >= clock.m1)
2327 Serge 544
				break;
545
			for (clock.n = limit->n.min;
546
			     clock.n <= limit->n.max; clock.n++) {
547
				for (clock.p1 = limit->p1.min;
548
					clock.p1 <= limit->p1.max; clock.p1++) {
549
					int this_err;
550
 
4104 Serge 551
					i9xx_clock(refclk, &clock);
2327 Serge 552
					if (!intel_PLL_is_valid(dev, limit,
553
								&clock))
554
						continue;
3031 serge 555
					if (match_clock &&
556
					    clock.p != match_clock->p)
557
						continue;
2327 Serge 558
 
559
					this_err = abs(clock.dot - target);
560
					if (this_err < err) {
561
						*best_clock = clock;
562
						err = this_err;
563
					}
564
				}
565
			}
566
		}
567
	}
568
 
569
	return (err != target);
570
}
571
 
572
static bool
4104 Serge 573
pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
574
		   int target, int refclk, intel_clock_t *match_clock,
575
		   intel_clock_t *best_clock)
576
{
577
	struct drm_device *dev = crtc->dev;
578
	intel_clock_t clock;
579
	int err = target;
580
 
581
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
582
		/*
583
		 * For LVDS just rely on its current settings for dual-channel.
584
		 * We haven't figured out how to reliably set up different
585
		 * single/dual channel state, if we even can.
586
		 */
587
		if (intel_is_dual_link_lvds(dev))
588
			clock.p2 = limit->p2.p2_fast;
589
		else
590
			clock.p2 = limit->p2.p2_slow;
591
	} else {
592
		if (target < limit->p2.dot_limit)
593
			clock.p2 = limit->p2.p2_slow;
594
		else
595
			clock.p2 = limit->p2.p2_fast;
596
	}
597
 
598
	memset(best_clock, 0, sizeof(*best_clock));
599
 
600
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
601
	     clock.m1++) {
602
		for (clock.m2 = limit->m2.min;
603
		     clock.m2 <= limit->m2.max; clock.m2++) {
604
			for (clock.n = limit->n.min;
605
			     clock.n <= limit->n.max; clock.n++) {
606
				for (clock.p1 = limit->p1.min;
607
					clock.p1 <= limit->p1.max; clock.p1++) {
608
					int this_err;
609
 
610
					pineview_clock(refclk, &clock);
611
					if (!intel_PLL_is_valid(dev, limit,
612
								&clock))
613
						continue;
614
					if (match_clock &&
615
					    clock.p != match_clock->p)
616
						continue;
617
 
618
					this_err = abs(clock.dot - target);
619
					if (this_err < err) {
620
						*best_clock = clock;
621
						err = this_err;
622
					}
623
				}
624
			}
625
		}
626
	}
627
 
628
	return (err != target);
629
}
630
 
631
static bool
632
g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 633
			int target, int refclk, intel_clock_t *match_clock,
634
			intel_clock_t *best_clock)
2327 Serge 635
{
636
	struct drm_device *dev = crtc->dev;
637
	intel_clock_t clock;
638
	int max_n;
639
	bool found;
640
	/* approximately equals target * 0.00585 */
641
	int err_most = (target >> 8) + (target >> 9);
642
	found = false;
643
 
644
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 645
		if (intel_is_dual_link_lvds(dev))
2327 Serge 646
			clock.p2 = limit->p2.p2_fast;
647
		else
648
			clock.p2 = limit->p2.p2_slow;
649
	} else {
650
		if (target < limit->p2.dot_limit)
651
			clock.p2 = limit->p2.p2_slow;
652
		else
653
			clock.p2 = limit->p2.p2_fast;
654
	}
655
 
656
	memset(best_clock, 0, sizeof(*best_clock));
657
	max_n = limit->n.max;
658
	/* based on hardware requirement, prefer smaller n to precision */
659
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
660
		/* based on hardware requirement, prefere larger m1,m2 */
661
		for (clock.m1 = limit->m1.max;
662
		     clock.m1 >= limit->m1.min; clock.m1--) {
663
			for (clock.m2 = limit->m2.max;
664
			     clock.m2 >= limit->m2.min; clock.m2--) {
665
				for (clock.p1 = limit->p1.max;
666
				     clock.p1 >= limit->p1.min; clock.p1--) {
667
					int this_err;
668
 
4104 Serge 669
					i9xx_clock(refclk, &clock);
2327 Serge 670
					if (!intel_PLL_is_valid(dev, limit,
671
								&clock))
672
						continue;
673
 
674
					this_err = abs(clock.dot - target);
675
					if (this_err < err_most) {
676
						*best_clock = clock;
677
						err_most = this_err;
678
						max_n = clock.n;
679
						found = true;
680
					}
681
				}
682
			}
683
		}
684
	}
685
	return found;
686
}
687
 
688
static bool
4104 Serge 689
vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 690
			int target, int refclk, intel_clock_t *match_clock,
691
			intel_clock_t *best_clock)
692
{
4560 Serge 693
	struct drm_device *dev = crtc->dev;
694
	intel_clock_t clock;
695
	unsigned int bestppm = 1000000;
696
	/* min update 19.2 MHz */
697
	int max_n = min(limit->n.max, refclk / 19200);
698
	bool found = false;
2327 Serge 699
 
4560 Serge 700
	target *= 5; /* fast clock */
3031 serge 701
 
4560 Serge 702
	memset(best_clock, 0, sizeof(*best_clock));
703
 
3031 serge 704
	/* based on hardware requirement, prefer smaller n to precision */
4560 Serge 705
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
706
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
707
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
708
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
709
				clock.p = clock.p1 * clock.p2;
3031 serge 710
				/* based on hardware requirement, prefer bigger m1,m2 values */
4560 Serge 711
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
712
					unsigned int ppm, diff;
713
 
714
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
715
								     refclk * clock.m1);
716
 
717
					vlv_clock(refclk, &clock);
718
 
719
					if (!intel_PLL_is_valid(dev, limit,
720
								&clock))
721
						continue;
722
 
723
					diff = abs(clock.dot - target);
724
					ppm = div_u64(1000000ULL * diff, target);
725
 
726
					if (ppm < 100 && clock.p > best_clock->p) {
3031 serge 727
							bestppm = 0;
4560 Serge 728
						*best_clock = clock;
729
						found = true;
3031 serge 730
						}
4560 Serge 731
 
732
					if (bestppm >= 10 && ppm < bestppm - 10) {
733
						bestppm = ppm;
734
						*best_clock = clock;
735
						found = true;
3031 serge 736
						}
737
						}
738
					}
739
				}
740
			}
741
 
4560 Serge 742
	return found;
3031 serge 743
}
744
 
4560 Serge 745
bool intel_crtc_active(struct drm_crtc *crtc)
746
{
747
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
748
 
749
	/* Be paranoid as we can arrive here with only partial
750
	 * state retrieved from the hardware during setup.
751
	 *
752
	 * We can ditch the adjusted_mode.crtc_clock check as soon
753
	 * as Haswell has gained clock readout/fastboot support.
754
	 *
755
	 * We can ditch the crtc->fb check as soon as we can
756
	 * properly reconstruct framebuffers.
757
	 */
758
	return intel_crtc->active && crtc->fb &&
759
		intel_crtc->config.adjusted_mode.crtc_clock;
760
}
761
 
3243 Serge 762
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
763
					     enum pipe pipe)
764
{
765
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
766
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
767
 
3746 Serge 768
	return intel_crtc->config.cpu_transcoder;
3243 Serge 769
}
770
 
4560 Serge 771
static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
3031 serge 772
{
773
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 774
	u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
3031 serge 775
 
776
	frame = I915_READ(frame_reg);
777
 
778
	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
779
		DRM_DEBUG_KMS("vblank wait timed out\n");
780
}
781
 
2327 Serge 782
/**
783
 * intel_wait_for_vblank - wait for vblank on a given pipe
784
 * @dev: drm device
785
 * @pipe: pipe to wait for
786
 *
787
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
788
 * mode setting code.
789
 */
790
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
791
{
792
	struct drm_i915_private *dev_priv = dev->dev_private;
793
	int pipestat_reg = PIPESTAT(pipe);
794
 
4560 Serge 795
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
796
		g4x_wait_for_vblank(dev, pipe);
3031 serge 797
		return;
798
	}
799
 
2327 Serge 800
	/* Clear existing vblank status. Note this will clear any other
801
	 * sticky status fields as well.
802
	 *
803
	 * This races with i915_driver_irq_handler() with the result
804
	 * that either function could miss a vblank event.  Here it is not
805
	 * fatal, as we will either wait upon the next vblank interrupt or
806
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
807
	 * called during modeset at which time the GPU should be idle and
808
	 * should *not* be performing page flips and thus not waiting on
809
	 * vblanks...
810
	 * Currently, the result of us stealing a vblank from the irq
811
	 * handler is that a single frame will be skipped during swapbuffers.
812
	 */
813
	I915_WRITE(pipestat_reg,
814
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
815
 
816
	/* Wait for vblank interrupt bit to set */
817
	if (wait_for(I915_READ(pipestat_reg) &
818
		     PIPE_VBLANK_INTERRUPT_STATUS,
819
		     50))
820
		DRM_DEBUG_KMS("vblank wait timed out\n");
821
}
822
 
4560 Serge 823
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
824
{
825
	struct drm_i915_private *dev_priv = dev->dev_private;
826
	u32 reg = PIPEDSL(pipe);
827
	u32 line1, line2;
828
	u32 line_mask;
829
 
830
	if (IS_GEN2(dev))
831
		line_mask = DSL_LINEMASK_GEN2;
832
	else
833
		line_mask = DSL_LINEMASK_GEN3;
834
 
835
	line1 = I915_READ(reg) & line_mask;
836
	mdelay(5);
837
	line2 = I915_READ(reg) & line_mask;
838
 
839
	return line1 == line2;
840
}
841
 
2327 Serge 842
/*
843
 * intel_wait_for_pipe_off - wait for pipe to turn off
844
 * @dev: drm device
845
 * @pipe: pipe to wait for
846
 *
847
 * After disabling a pipe, we can't wait for vblank in the usual way,
848
 * spinning on the vblank interrupt status bit, since we won't actually
849
 * see an interrupt when the pipe is disabled.
850
 *
851
 * On Gen4 and above:
852
 *   wait for the pipe register state bit to turn off
853
 *
854
 * Otherwise:
855
 *   wait for the display line value to settle (it usually
856
 *   ends up stopping at the start of the next frame).
857
 *
858
 */
859
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
860
{
861
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 862
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
863
								      pipe);
2327 Serge 864
 
865
	if (INTEL_INFO(dev)->gen >= 4) {
3243 Serge 866
		int reg = PIPECONF(cpu_transcoder);
2327 Serge 867
 
868
		/* Wait for the Pipe State to go off */
869
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
870
			     100))
3031 serge 871
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 872
	} else {
873
		/* Wait for the display line to settle */
4560 Serge 874
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
3031 serge 875
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 876
	}
877
}
878
 
3480 Serge 879
/*
880
 * ibx_digital_port_connected - is the specified port connected?
881
 * @dev_priv: i915 private structure
882
 * @port: the port to test
883
 *
884
 * Returns true if @port is connected, false otherwise.
885
 */
886
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
887
				struct intel_digital_port *port)
888
{
889
	u32 bit;
890
 
891
	if (HAS_PCH_IBX(dev_priv->dev)) {
892
		switch(port->port) {
893
		case PORT_B:
894
			bit = SDE_PORTB_HOTPLUG;
895
			break;
896
		case PORT_C:
897
			bit = SDE_PORTC_HOTPLUG;
898
			break;
899
		case PORT_D:
900
			bit = SDE_PORTD_HOTPLUG;
901
			break;
902
		default:
903
			return true;
904
		}
905
	} else {
906
		switch(port->port) {
907
		case PORT_B:
908
			bit = SDE_PORTB_HOTPLUG_CPT;
909
			break;
910
		case PORT_C:
911
			bit = SDE_PORTC_HOTPLUG_CPT;
912
			break;
913
		case PORT_D:
914
			bit = SDE_PORTD_HOTPLUG_CPT;
915
			break;
916
		default:
917
			return true;
918
		}
919
	}
920
 
921
	return I915_READ(SDEISR) & bit;
922
}
923
 
2327 Serge 924
static const char *state_string(bool enabled)
925
{
926
	return enabled ? "on" : "off";
927
}
928
 
929
/* Only for pre-ILK configs */
4104 Serge 930
void assert_pll(struct drm_i915_private *dev_priv,
2327 Serge 931
		       enum pipe pipe, bool state)
932
{
933
	int reg;
934
	u32 val;
935
	bool cur_state;
936
 
937
	reg = DPLL(pipe);
938
	val = I915_READ(reg);
939
	cur_state = !!(val & DPLL_VCO_ENABLE);
940
	WARN(cur_state != state,
941
	     "PLL state assertion failure (expected %s, current %s)\n",
942
	     state_string(state), state_string(cur_state));
943
}
944
 
4560 Serge 945
/* XXX: the dsi pll is shared between MIPI DSI ports */
946
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
947
{
948
	u32 val;
949
	bool cur_state;
950
 
951
	mutex_lock(&dev_priv->dpio_lock);
952
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
953
	mutex_unlock(&dev_priv->dpio_lock);
954
 
955
	cur_state = val & DSI_PLL_VCO_EN;
956
	WARN(cur_state != state,
957
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
958
	     state_string(state), state_string(cur_state));
959
}
960
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
961
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
962
 
4104 Serge 963
struct intel_shared_dpll *
964
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
965
{
966
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
967
 
968
	if (crtc->config.shared_dpll < 0)
969
		return NULL;
970
 
971
	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
972
}
973
 
2327 Serge 974
/* For ILK+ */
4104 Serge 975
void assert_shared_dpll(struct drm_i915_private *dev_priv,
976
			       struct intel_shared_dpll *pll,
3031 serge 977
			   bool state)
2327 Serge 978
{
979
	bool cur_state;
4104 Serge 980
	struct intel_dpll_hw_state hw_state;
2327 Serge 981
 
3031 serge 982
	if (HAS_PCH_LPT(dev_priv->dev)) {
983
		DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
984
		return;
985
	}
2342 Serge 986
 
3031 serge 987
	if (WARN (!pll,
4104 Serge 988
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
3031 serge 989
		return;
2342 Serge 990
 
4104 Serge 991
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
3031 serge 992
	WARN(cur_state != state,
4104 Serge 993
	     "%s assertion failure (expected %s, current %s)\n",
994
	     pll->name, state_string(state), state_string(cur_state));
2327 Serge 995
}
996
 
997
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
998
			  enum pipe pipe, bool state)
999
{
1000
	int reg;
1001
	u32 val;
1002
	bool cur_state;
3243 Serge 1003
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1004
								      pipe);
2327 Serge 1005
 
3480 Serge 1006
	if (HAS_DDI(dev_priv->dev)) {
1007
		/* DDI does not have a specific FDI_TX register */
3243 Serge 1008
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
3031 serge 1009
		val = I915_READ(reg);
3243 Serge 1010
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
3031 serge 1011
	} else {
2327 Serge 1012
	reg = FDI_TX_CTL(pipe);
1013
	val = I915_READ(reg);
1014
	cur_state = !!(val & FDI_TX_ENABLE);
3031 serge 1015
	}
2327 Serge 1016
	WARN(cur_state != state,
1017
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1018
	     state_string(state), state_string(cur_state));
1019
}
1020
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1021
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1022
 
1023
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1024
			  enum pipe pipe, bool state)
1025
{
1026
	int reg;
1027
	u32 val;
1028
	bool cur_state;
1029
 
1030
	reg = FDI_RX_CTL(pipe);
1031
	val = I915_READ(reg);
1032
	cur_state = !!(val & FDI_RX_ENABLE);
1033
	WARN(cur_state != state,
1034
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1035
	     state_string(state), state_string(cur_state));
1036
}
1037
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1038
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1039
 
1040
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1041
				      enum pipe pipe)
1042
{
1043
	int reg;
1044
	u32 val;
1045
 
1046
	/* ILK FDI PLL is always enabled */
1047
	if (dev_priv->info->gen == 5)
1048
		return;
1049
 
3031 serge 1050
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
3480 Serge 1051
	if (HAS_DDI(dev_priv->dev))
3031 serge 1052
		return;
1053
 
2327 Serge 1054
	reg = FDI_TX_CTL(pipe);
1055
	val = I915_READ(reg);
1056
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1057
}
1058
 
4104 Serge 1059
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1060
		       enum pipe pipe, bool state)
2327 Serge 1061
{
1062
	int reg;
1063
	u32 val;
4104 Serge 1064
	bool cur_state;
2327 Serge 1065
 
1066
	reg = FDI_RX_CTL(pipe);
1067
	val = I915_READ(reg);
4104 Serge 1068
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1069
	WARN(cur_state != state,
1070
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1071
	     state_string(state), state_string(cur_state));
2327 Serge 1072
}
1073
 
1074
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1075
				  enum pipe pipe)
1076
{
1077
	int pp_reg, lvds_reg;
1078
	u32 val;
1079
	enum pipe panel_pipe = PIPE_A;
1080
	bool locked = true;
1081
 
1082
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1083
		pp_reg = PCH_PP_CONTROL;
1084
		lvds_reg = PCH_LVDS;
1085
	} else {
1086
		pp_reg = PP_CONTROL;
1087
		lvds_reg = LVDS;
1088
	}
1089
 
1090
	val = I915_READ(pp_reg);
1091
	if (!(val & PANEL_POWER_ON) ||
1092
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1093
		locked = false;
1094
 
1095
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1096
		panel_pipe = PIPE_B;
1097
 
1098
	WARN(panel_pipe == pipe && locked,
1099
	     "panel assertion failure, pipe %c regs locked\n",
1100
	     pipe_name(pipe));
1101
}
1102
 
4560 Serge 1103
static void assert_cursor(struct drm_i915_private *dev_priv,
1104
			  enum pipe pipe, bool state)
1105
{
1106
	struct drm_device *dev = dev_priv->dev;
1107
	bool cur_state;
1108
 
1109
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1110
		cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
1111
	else if (IS_845G(dev) || IS_I865G(dev))
1112
		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1113
	else
1114
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1115
 
1116
	WARN(cur_state != state,
1117
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1118
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1119
}
1120
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1121
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1122
 
2342 Serge 1123
void assert_pipe(struct drm_i915_private *dev_priv,
2327 Serge 1124
			enum pipe pipe, bool state)
1125
{
1126
	int reg;
1127
	u32 val;
1128
	bool cur_state;
3243 Serge 1129
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1130
								      pipe);
2327 Serge 1131
 
3031 serge 1132
	/* if we need the pipe A quirk it must be always on */
1133
	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1134
		state = true;
1135
 
4104 Serge 1136
	if (!intel_display_power_enabled(dev_priv->dev,
1137
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
3480 Serge 1138
		cur_state = false;
1139
	} else {
3243 Serge 1140
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1141
	val = I915_READ(reg);
1142
	cur_state = !!(val & PIPECONF_ENABLE);
3480 Serge 1143
	}
1144
 
2327 Serge 1145
	WARN(cur_state != state,
1146
	     "pipe %c assertion failure (expected %s, current %s)\n",
1147
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1148
}
1149
 
3031 serge 1150
static void assert_plane(struct drm_i915_private *dev_priv,
1151
			 enum plane plane, bool state)
2327 Serge 1152
{
1153
	int reg;
1154
	u32 val;
3031 serge 1155
	bool cur_state;
2327 Serge 1156
 
1157
	reg = DSPCNTR(plane);
1158
	val = I915_READ(reg);
3031 serge 1159
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1160
	WARN(cur_state != state,
1161
	     "plane %c assertion failure (expected %s, current %s)\n",
1162
	     plane_name(plane), state_string(state), state_string(cur_state));
2327 Serge 1163
}
1164
 
3031 serge 1165
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1166
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1167
 
2327 Serge 1168
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1169
				   enum pipe pipe)
1170
{
4104 Serge 1171
	struct drm_device *dev = dev_priv->dev;
2327 Serge 1172
	int reg, i;
1173
	u32 val;
1174
	int cur_pipe;
1175
 
4104 Serge 1176
	/* Primary planes are fixed to pipes on gen4+ */
1177
	if (INTEL_INFO(dev)->gen >= 4) {
3031 serge 1178
		reg = DSPCNTR(pipe);
1179
		val = I915_READ(reg);
1180
		WARN((val & DISPLAY_PLANE_ENABLE),
1181
		     "plane %c assertion failure, should be disabled but not\n",
1182
		     plane_name(pipe));
2327 Serge 1183
		return;
3031 serge 1184
	}
2327 Serge 1185
 
1186
	/* Need to check both planes against the pipe */
4104 Serge 1187
	for_each_pipe(i) {
2327 Serge 1188
		reg = DSPCNTR(i);
1189
		val = I915_READ(reg);
1190
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1191
			DISPPLANE_SEL_PIPE_SHIFT;
1192
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1193
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1194
		     plane_name(i), pipe_name(pipe));
1195
	}
1196
}
1197
 
3746 Serge 1198
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1199
				    enum pipe pipe)
1200
{
4104 Serge 1201
	struct drm_device *dev = dev_priv->dev;
3746 Serge 1202
	int reg, i;
1203
	u32 val;
1204
 
4104 Serge 1205
	if (IS_VALLEYVIEW(dev)) {
3746 Serge 1206
	for (i = 0; i < dev_priv->num_plane; i++) {
1207
		reg = SPCNTR(pipe, i);
1208
		val = I915_READ(reg);
1209
		WARN((val & SP_ENABLE),
4104 Serge 1210
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1211
			     sprite_name(pipe, i), pipe_name(pipe));
1212
		}
1213
	} else if (INTEL_INFO(dev)->gen >= 7) {
1214
		reg = SPRCTL(pipe);
1215
		val = I915_READ(reg);
1216
		WARN((val & SPRITE_ENABLE),
1217
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1218
		     plane_name(pipe), pipe_name(pipe));
1219
	} else if (INTEL_INFO(dev)->gen >= 5) {
1220
		reg = DVSCNTR(pipe);
1221
		val = I915_READ(reg);
1222
		WARN((val & DVS_ENABLE),
1223
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1224
		     plane_name(pipe), pipe_name(pipe));
3746 Serge 1225
	}
1226
}
1227
 
4560 Serge 1228
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
2327 Serge 1229
{
1230
	u32 val;
1231
	bool enabled;
1232
 
4560 Serge 1233
	WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
3031 serge 1234
 
2327 Serge 1235
	val = I915_READ(PCH_DREF_CONTROL);
1236
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1237
			    DREF_SUPERSPREAD_SOURCE_MASK));
1238
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1239
}
1240
 
4104 Serge 1241
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
2327 Serge 1242
				       enum pipe pipe)
1243
{
1244
	int reg;
1245
	u32 val;
1246
	bool enabled;
1247
 
4104 Serge 1248
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1249
	val = I915_READ(reg);
1250
	enabled = !!(val & TRANS_ENABLE);
1251
	WARN(enabled,
1252
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1253
	     pipe_name(pipe));
1254
}
1255
 
1256
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1257
			    enum pipe pipe, u32 port_sel, u32 val)
1258
{
1259
	if ((val & DP_PORT_EN) == 0)
1260
		return false;
1261
 
1262
	if (HAS_PCH_CPT(dev_priv->dev)) {
1263
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1264
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1265
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1266
			return false;
1267
	} else {
1268
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1269
			return false;
1270
	}
1271
	return true;
1272
}
1273
 
1274
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1275
			      enum pipe pipe, u32 val)
1276
{
3746 Serge 1277
	if ((val & SDVO_ENABLE) == 0)
2327 Serge 1278
		return false;
1279
 
1280
	if (HAS_PCH_CPT(dev_priv->dev)) {
3746 Serge 1281
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
2327 Serge 1282
			return false;
1283
	} else {
3746 Serge 1284
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
2327 Serge 1285
			return false;
1286
	}
1287
	return true;
1288
}
1289
 
1290
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1291
			      enum pipe pipe, u32 val)
1292
{
1293
	if ((val & LVDS_PORT_EN) == 0)
1294
		return false;
1295
 
1296
	if (HAS_PCH_CPT(dev_priv->dev)) {
1297
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1298
			return false;
1299
	} else {
1300
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1301
			return false;
1302
	}
1303
	return true;
1304
}
1305
 
1306
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1307
			      enum pipe pipe, u32 val)
1308
{
1309
	if ((val & ADPA_DAC_ENABLE) == 0)
1310
		return false;
1311
	if (HAS_PCH_CPT(dev_priv->dev)) {
1312
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1313
			return false;
1314
	} else {
1315
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1316
			return false;
1317
	}
1318
	return true;
1319
}
1320
 
1321
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1322
				   enum pipe pipe, int reg, u32 port_sel)
1323
{
1324
	u32 val = I915_READ(reg);
1325
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1326
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1327
	     reg, pipe_name(pipe));
3031 serge 1328
 
1329
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1330
	     && (val & DP_PIPEB_SELECT),
1331
	     "IBX PCH dp port still using transcoder B\n");
2327 Serge 1332
}
1333
 
1334
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1335
				     enum pipe pipe, int reg)
1336
{
1337
	u32 val = I915_READ(reg);
3031 serge 1338
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1339
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
2327 Serge 1340
	     reg, pipe_name(pipe));
3031 serge 1341
 
3746 Serge 1342
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
3031 serge 1343
	     && (val & SDVO_PIPE_B_SELECT),
1344
	     "IBX PCH hdmi port still using transcoder B\n");
2327 Serge 1345
}
1346
 
1347
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1348
				      enum pipe pipe)
1349
{
1350
	int reg;
1351
	u32 val;
1352
 
1353
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1354
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1355
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1356
 
1357
	reg = PCH_ADPA;
1358
	val = I915_READ(reg);
3031 serge 1359
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1360
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1361
	     pipe_name(pipe));
1362
 
1363
	reg = PCH_LVDS;
1364
	val = I915_READ(reg);
3031 serge 1365
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1366
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1367
	     pipe_name(pipe));
1368
 
3746 Serge 1369
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1370
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1371
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
2327 Serge 1372
}
1373
 
4560 Serge 1374
static void intel_init_dpio(struct drm_device *dev)
1375
{
1376
	struct drm_i915_private *dev_priv = dev->dev_private;
1377
 
1378
	if (!IS_VALLEYVIEW(dev))
1379
		return;
1380
 
1381
	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1382
}
1383
 
1384
static void intel_reset_dpio(struct drm_device *dev)
1385
{
1386
	struct drm_i915_private *dev_priv = dev->dev_private;
1387
 
1388
	if (!IS_VALLEYVIEW(dev))
1389
		return;
1390
 
1391
	/*
1392
	 * Enable the CRI clock source so we can get at the display and the
1393
	 * reference clock for VGA hotplug / manual detection.
1394
	 */
1395
	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
1396
		   DPLL_REFA_CLK_ENABLE_VLV |
1397
		   DPLL_INTEGRATED_CRI_CLK_VLV);
1398
 
1399
	/*
1400
	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1401
	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1402
	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1403
	 *   b.	The other bits such as sfr settings / modesel may all be set
1404
	 *      to 0.
1405
	 *
1406
	 * This should only be done on init and resume from S3 with both
1407
	 * PLLs disabled, or we risk losing DPIO and PLL synchronization.
1408
	 */
1409
	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1410
}
1411
 
4104 Serge 1412
static void vlv_enable_pll(struct intel_crtc *crtc)
2327 Serge 1413
{
4104 Serge 1414
	struct drm_device *dev = crtc->base.dev;
1415
	struct drm_i915_private *dev_priv = dev->dev_private;
1416
	int reg = DPLL(crtc->pipe);
1417
	u32 dpll = crtc->config.dpll_hw_state.dpll;
2327 Serge 1418
 
4104 Serge 1419
	assert_pipe_disabled(dev_priv, crtc->pipe);
1420
 
2327 Serge 1421
    /* No really, not for ILK+ */
4104 Serge 1422
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
2327 Serge 1423
 
1424
    /* PLL is protected by panel, make sure we can write it */
1425
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
4104 Serge 1426
		assert_panel_unlocked(dev_priv, crtc->pipe);
2327 Serge 1427
 
4104 Serge 1428
	I915_WRITE(reg, dpll);
1429
	POSTING_READ(reg);
1430
	udelay(150);
2327 Serge 1431
 
4104 Serge 1432
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1433
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1434
 
1435
	I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1436
	POSTING_READ(DPLL_MD(crtc->pipe));
1437
 
1438
	/* We do this three times for luck */
1439
	I915_WRITE(reg, dpll);
1440
	POSTING_READ(reg);
1441
	udelay(150); /* wait for warmup */
1442
	I915_WRITE(reg, dpll);
1443
	POSTING_READ(reg);
1444
	udelay(150); /* wait for warmup */
1445
	I915_WRITE(reg, dpll);
1446
	POSTING_READ(reg);
1447
	udelay(150); /* wait for warmup */
1448
}
1449
 
1450
static void i9xx_enable_pll(struct intel_crtc *crtc)
1451
{
1452
	struct drm_device *dev = crtc->base.dev;
1453
	struct drm_i915_private *dev_priv = dev->dev_private;
1454
	int reg = DPLL(crtc->pipe);
1455
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1456
 
1457
	assert_pipe_disabled(dev_priv, crtc->pipe);
1458
 
1459
	/* No really, not for ILK+ */
1460
	BUG_ON(dev_priv->info->gen >= 5);
1461
 
1462
	/* PLL is protected by panel, make sure we can write it */
1463
	if (IS_MOBILE(dev) && !IS_I830(dev))
1464
		assert_panel_unlocked(dev_priv, crtc->pipe);
1465
 
1466
	I915_WRITE(reg, dpll);
1467
 
1468
	/* Wait for the clocks to stabilize. */
1469
	POSTING_READ(reg);
1470
	udelay(150);
1471
 
1472
	if (INTEL_INFO(dev)->gen >= 4) {
1473
		I915_WRITE(DPLL_MD(crtc->pipe),
1474
			   crtc->config.dpll_hw_state.dpll_md);
1475
	} else {
1476
		/* The pixel multiplier can only be updated once the
1477
		 * DPLL is enabled and the clocks are stable.
1478
		 *
1479
		 * So write it again.
1480
		 */
1481
		I915_WRITE(reg, dpll);
1482
	}
1483
 
2327 Serge 1484
    /* We do this three times for luck */
4104 Serge 1485
	I915_WRITE(reg, dpll);
2327 Serge 1486
    POSTING_READ(reg);
1487
    udelay(150); /* wait for warmup */
4104 Serge 1488
	I915_WRITE(reg, dpll);
2327 Serge 1489
    POSTING_READ(reg);
1490
    udelay(150); /* wait for warmup */
4104 Serge 1491
	I915_WRITE(reg, dpll);
2327 Serge 1492
    POSTING_READ(reg);
1493
    udelay(150); /* wait for warmup */
1494
}
1495
 
1496
/**
4104 Serge 1497
 * i9xx_disable_pll - disable a PLL
2327 Serge 1498
 * @dev_priv: i915 private structure
1499
 * @pipe: pipe PLL to disable
1500
 *
1501
 * Disable the PLL for @pipe, making sure the pipe is off first.
1502
 *
1503
 * Note!  This is for pre-ILK only.
1504
 */
4104 Serge 1505
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
2327 Serge 1506
{
1507
	/* Don't disable pipe A or pipe A PLLs if needed */
1508
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1509
		return;
1510
 
1511
	/* Make sure the pipe isn't still relying on us */
1512
	assert_pipe_disabled(dev_priv, pipe);
1513
 
4104 Serge 1514
	I915_WRITE(DPLL(pipe), 0);
1515
	POSTING_READ(DPLL(pipe));
2327 Serge 1516
}
1517
 
4539 Serge 1518
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1519
{
1520
	u32 val = 0;
1521
 
1522
	/* Make sure the pipe isn't still relying on us */
1523
	assert_pipe_disabled(dev_priv, pipe);
1524
 
4560 Serge 1525
	/*
1526
	 * Leave integrated clock source and reference clock enabled for pipe B.
1527
	 * The latter is needed for VGA hotplug / manual detection.
1528
	 */
4539 Serge 1529
	if (pipe == PIPE_B)
4560 Serge 1530
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
4539 Serge 1531
	I915_WRITE(DPLL(pipe), val);
1532
	POSTING_READ(DPLL(pipe));
1533
}
1534
 
4560 Serge 1535
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1536
		struct intel_digital_port *dport)
3031 serge 1537
{
4104 Serge 1538
	u32 port_mask;
3031 serge 1539
 
4560 Serge 1540
	switch (dport->port) {
1541
	case PORT_B:
4104 Serge 1542
		port_mask = DPLL_PORTB_READY_MASK;
4560 Serge 1543
		break;
1544
	case PORT_C:
4104 Serge 1545
		port_mask = DPLL_PORTC_READY_MASK;
4560 Serge 1546
		break;
1547
	default:
1548
		BUG();
1549
	}
3243 Serge 1550
 
4104 Serge 1551
	if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
1552
		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
4560 Serge 1553
		     port_name(dport->port), I915_READ(DPLL(0)));
3031 serge 1554
}
1555
 
2327 Serge 1556
/**
4104 Serge 1557
 * ironlake_enable_shared_dpll - enable PCH PLL
2327 Serge 1558
 * @dev_priv: i915 private structure
1559
 * @pipe: pipe PLL to enable
1560
 *
1561
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1562
 * drives the transcoder clock.
1563
 */
4104 Serge 1564
static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1565
{
4104 Serge 1566
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1567
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1568
 
3031 serge 1569
	/* PCH PLLs only available on ILK, SNB and IVB */
1570
	BUG_ON(dev_priv->info->gen < 5);
4104 Serge 1571
	if (WARN_ON(pll == NULL))
2342 Serge 1572
		return;
1573
 
3031 serge 1574
	if (WARN_ON(pll->refcount == 0))
1575
		return;
2327 Serge 1576
 
4104 Serge 1577
	DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1578
		      pll->name, pll->active, pll->on,
1579
		      crtc->base.base.id);
3031 serge 1580
 
4104 Serge 1581
	if (pll->active++) {
1582
		WARN_ON(!pll->on);
1583
		assert_shared_dpll_enabled(dev_priv, pll);
3031 serge 1584
		return;
1585
	}
4104 Serge 1586
	WARN_ON(pll->on);
3031 serge 1587
 
4104 Serge 1588
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1589
	pll->enable(dev_priv, pll);
3031 serge 1590
	pll->on = true;
2327 Serge 1591
}
1592
 
4104 Serge 1593
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1594
{
4104 Serge 1595
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1596
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1597
 
1598
	/* PCH only available on ILK+ */
1599
	BUG_ON(dev_priv->info->gen < 5);
4104 Serge 1600
	if (WARN_ON(pll == NULL))
3031 serge 1601
	       return;
2327 Serge 1602
 
3031 serge 1603
	if (WARN_ON(pll->refcount == 0))
1604
		return;
2327 Serge 1605
 
4104 Serge 1606
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1607
		      pll->name, pll->active, pll->on,
1608
		      crtc->base.base.id);
2342 Serge 1609
 
3031 serge 1610
	if (WARN_ON(pll->active == 0)) {
4104 Serge 1611
		assert_shared_dpll_disabled(dev_priv, pll);
3031 serge 1612
		return;
1613
	}
2342 Serge 1614
 
4104 Serge 1615
	assert_shared_dpll_enabled(dev_priv, pll);
1616
	WARN_ON(!pll->on);
1617
	if (--pll->active)
2342 Serge 1618
		return;
1619
 
4104 Serge 1620
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1621
	pll->disable(dev_priv, pll);
3031 serge 1622
	pll->on = false;
2327 Serge 1623
}
1624
 
3243 Serge 1625
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1626
				    enum pipe pipe)
1627
{
3243 Serge 1628
	struct drm_device *dev = dev_priv->dev;
3031 serge 1629
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4104 Serge 1630
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3243 Serge 1631
	uint32_t reg, val, pipeconf_val;
2327 Serge 1632
 
1633
	/* PCH only available on ILK+ */
1634
	BUG_ON(dev_priv->info->gen < 5);
1635
 
1636
	/* Make sure PCH DPLL is enabled */
4104 Serge 1637
	assert_shared_dpll_enabled(dev_priv,
1638
				   intel_crtc_to_shared_dpll(intel_crtc));
2327 Serge 1639
 
1640
	/* FDI must be feeding us bits for PCH ports */
1641
	assert_fdi_tx_enabled(dev_priv, pipe);
1642
	assert_fdi_rx_enabled(dev_priv, pipe);
1643
 
3243 Serge 1644
	if (HAS_PCH_CPT(dev)) {
1645
		/* Workaround: Set the timing override bit before enabling the
1646
		 * pch transcoder. */
1647
		reg = TRANS_CHICKEN2(pipe);
1648
		val = I915_READ(reg);
1649
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1650
		I915_WRITE(reg, val);
3031 serge 1651
	}
3243 Serge 1652
 
4104 Serge 1653
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1654
	val = I915_READ(reg);
3031 serge 1655
	pipeconf_val = I915_READ(PIPECONF(pipe));
2327 Serge 1656
 
1657
	if (HAS_PCH_IBX(dev_priv->dev)) {
1658
		/*
1659
		 * make the BPC in transcoder be consistent with
1660
		 * that in pipeconf reg.
1661
		 */
3480 Serge 1662
		val &= ~PIPECONF_BPC_MASK;
1663
		val |= pipeconf_val & PIPECONF_BPC_MASK;
2327 Serge 1664
	}
3031 serge 1665
 
1666
	val &= ~TRANS_INTERLACE_MASK;
1667
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1668
		if (HAS_PCH_IBX(dev_priv->dev) &&
1669
		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1670
			val |= TRANS_LEGACY_INTERLACED_ILK;
1671
		else
1672
			val |= TRANS_INTERLACED;
1673
	else
1674
		val |= TRANS_PROGRESSIVE;
1675
 
2327 Serge 1676
	I915_WRITE(reg, val | TRANS_ENABLE);
1677
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4104 Serge 1678
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2327 Serge 1679
}
1680
 
3243 Serge 1681
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1682
				      enum transcoder cpu_transcoder)
1683
{
1684
	u32 val, pipeconf_val;
1685
 
1686
	/* PCH only available on ILK+ */
1687
	BUG_ON(dev_priv->info->gen < 5);
1688
 
1689
	/* FDI must be feeding us bits for PCH ports */
3480 Serge 1690
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
3243 Serge 1691
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1692
 
1693
	/* Workaround: set timing override bit. */
1694
	val = I915_READ(_TRANSA_CHICKEN2);
1695
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1696
	I915_WRITE(_TRANSA_CHICKEN2, val);
1697
 
1698
	val = TRANS_ENABLE;
1699
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1700
 
1701
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1702
	    PIPECONF_INTERLACED_ILK)
1703
		val |= TRANS_INTERLACED;
1704
	else
1705
		val |= TRANS_PROGRESSIVE;
1706
 
4104 Serge 1707
	I915_WRITE(LPT_TRANSCONF, val);
1708
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
3243 Serge 1709
		DRM_ERROR("Failed to enable PCH transcoder\n");
1710
}
1711
 
1712
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1713
				     enum pipe pipe)
1714
{
3243 Serge 1715
	struct drm_device *dev = dev_priv->dev;
1716
	uint32_t reg, val;
2327 Serge 1717
 
1718
	/* FDI relies on the transcoder */
1719
	assert_fdi_tx_disabled(dev_priv, pipe);
1720
	assert_fdi_rx_disabled(dev_priv, pipe);
1721
 
1722
	/* Ports must be off as well */
1723
	assert_pch_ports_disabled(dev_priv, pipe);
1724
 
4104 Serge 1725
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1726
	val = I915_READ(reg);
1727
	val &= ~TRANS_ENABLE;
1728
	I915_WRITE(reg, val);
1729
	/* wait for PCH transcoder off, transcoder state */
1730
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4104 Serge 1731
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
3243 Serge 1732
 
1733
	if (!HAS_PCH_IBX(dev)) {
1734
		/* Workaround: Clear the timing override chicken bit again. */
1735
		reg = TRANS_CHICKEN2(pipe);
1736
		val = I915_READ(reg);
1737
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1738
		I915_WRITE(reg, val);
1739
	}
2327 Serge 1740
}
1741
 
3243 Serge 1742
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1743
{
1744
	u32 val;
1745
 
4104 Serge 1746
	val = I915_READ(LPT_TRANSCONF);
3243 Serge 1747
	val &= ~TRANS_ENABLE;
4104 Serge 1748
	I915_WRITE(LPT_TRANSCONF, val);
3243 Serge 1749
	/* wait for PCH transcoder off, transcoder state */
4104 Serge 1750
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
3243 Serge 1751
		DRM_ERROR("Failed to disable PCH transcoder\n");
1752
 
1753
	/* Workaround: clear timing override bit. */
1754
	val = I915_READ(_TRANSA_CHICKEN2);
1755
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1756
	I915_WRITE(_TRANSA_CHICKEN2, val);
1757
}
1758
 
2327 Serge 1759
/**
1760
 * intel_enable_pipe - enable a pipe, asserting requirements
1761
 * @dev_priv: i915 private structure
1762
 * @pipe: pipe to enable
1763
 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1764
 *
1765
 * Enable @pipe, making sure that various hardware specific requirements
1766
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1767
 *
1768
 * @pipe should be %PIPE_A or %PIPE_B.
1769
 *
1770
 * Will wait until the pipe is actually running (i.e. first vblank) before
1771
 * returning.
1772
 */
1773
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
4560 Serge 1774
			      bool pch_port, bool dsi)
2327 Serge 1775
{
3243 Serge 1776
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1777
								      pipe);
3480 Serge 1778
	enum pipe pch_transcoder;
2327 Serge 1779
	int reg;
1780
	u32 val;
1781
 
4104 Serge 1782
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 1783
	assert_cursor_disabled(dev_priv, pipe);
4104 Serge 1784
	assert_sprites_disabled(dev_priv, pipe);
1785
 
3480 Serge 1786
	if (HAS_PCH_LPT(dev_priv->dev))
3243 Serge 1787
		pch_transcoder = TRANSCODER_A;
1788
	else
1789
		pch_transcoder = pipe;
1790
 
2327 Serge 1791
	/*
1792
	 * A pipe without a PLL won't actually be able to drive bits from
1793
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1794
	 * need the check.
1795
	 */
1796
	if (!HAS_PCH_SPLIT(dev_priv->dev))
4560 Serge 1797
		if (dsi)
1798
			assert_dsi_pll_enabled(dev_priv);
1799
		else
2327 Serge 1800
		assert_pll_enabled(dev_priv, pipe);
1801
	else {
1802
		if (pch_port) {
1803
			/* if driving the PCH, we need FDI enabled */
3243 Serge 1804
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
3480 Serge 1805
			assert_fdi_tx_pll_enabled(dev_priv,
1806
						  (enum pipe) cpu_transcoder);
2327 Serge 1807
		}
1808
		/* FIXME: assert CPU port conditions for SNB+ */
1809
	}
1810
 
3243 Serge 1811
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1812
	val = I915_READ(reg);
1813
	if (val & PIPECONF_ENABLE)
1814
		return;
1815
 
1816
	I915_WRITE(reg, val | PIPECONF_ENABLE);
1817
	intel_wait_for_vblank(dev_priv->dev, pipe);
1818
}
1819
 
1820
/**
1821
 * intel_disable_pipe - disable a pipe, asserting requirements
1822
 * @dev_priv: i915 private structure
1823
 * @pipe: pipe to disable
1824
 *
1825
 * Disable @pipe, making sure that various hardware specific requirements
1826
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1827
 *
1828
 * @pipe should be %PIPE_A or %PIPE_B.
1829
 *
1830
 * Will wait until the pipe has shut down before returning.
1831
 */
1832
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1833
			       enum pipe pipe)
1834
{
3243 Serge 1835
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1836
								      pipe);
2327 Serge 1837
	int reg;
1838
	u32 val;
1839
 
3031 serge 1840
    /*
2327 Serge 1841
	 * Make sure planes won't keep trying to pump pixels to us,
1842
	 * or we might hang the display.
1843
	 */
1844
	assert_planes_disabled(dev_priv, pipe);
4560 Serge 1845
	assert_cursor_disabled(dev_priv, pipe);
3746 Serge 1846
	assert_sprites_disabled(dev_priv, pipe);
2327 Serge 1847
 
1848
	/* Don't disable pipe A or pipe A PLLs if needed */
1849
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1850
		return;
1851
 
3243 Serge 1852
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1853
	val = I915_READ(reg);
1854
	if ((val & PIPECONF_ENABLE) == 0)
1855
		return;
1856
 
1857
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1858
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1859
}
1860
 
1861
/*
1862
 * Plane regs are double buffered, going from enabled->disabled needs a
1863
 * trigger in order to latch.  The display address reg provides this.
1864
 */
4560 Serge 1865
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2327 Serge 1866
				      enum plane plane)
1867
{
4560 Serge 1868
	u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
1869
 
1870
	I915_WRITE(reg, I915_READ(reg));
1871
	POSTING_READ(reg);
2327 Serge 1872
}
1873
 
1874
/**
4560 Serge 1875
 * intel_enable_primary_plane - enable the primary plane on a given pipe
2327 Serge 1876
 * @dev_priv: i915 private structure
1877
 * @plane: plane to enable
1878
 * @pipe: pipe being fed
1879
 *
1880
 * Enable @plane on @pipe, making sure that @pipe is running first.
1881
 */
4560 Serge 1882
static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
2327 Serge 1883
			       enum plane plane, enum pipe pipe)
1884
{
4560 Serge 1885
	struct intel_crtc *intel_crtc =
1886
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2327 Serge 1887
	int reg;
1888
	u32 val;
1889
 
1890
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1891
	assert_pipe_enabled(dev_priv, pipe);
1892
 
4560 Serge 1893
	WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
1894
 
1895
	intel_crtc->primary_enabled = true;
1896
 
2327 Serge 1897
	reg = DSPCNTR(plane);
1898
	val = I915_READ(reg);
1899
	if (val & DISPLAY_PLANE_ENABLE)
1900
		return;
1901
 
1902
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
4560 Serge 1903
	intel_flush_primary_plane(dev_priv, plane);
2327 Serge 1904
	intel_wait_for_vblank(dev_priv->dev, pipe);
1905
}
1906
 
1907
/**
4560 Serge 1908
 * intel_disable_primary_plane - disable the primary plane
2327 Serge 1909
 * @dev_priv: i915 private structure
1910
 * @plane: plane to disable
1911
 * @pipe: pipe consuming the data
1912
 *
1913
 * Disable @plane; should be an independent operation.
1914
 */
4560 Serge 1915
static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
2327 Serge 1916
				enum plane plane, enum pipe pipe)
1917
{
4560 Serge 1918
	struct intel_crtc *intel_crtc =
1919
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2327 Serge 1920
	int reg;
1921
	u32 val;
1922
 
4560 Serge 1923
	WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
1924
 
1925
	intel_crtc->primary_enabled = false;
1926
 
2327 Serge 1927
	reg = DSPCNTR(plane);
1928
	val = I915_READ(reg);
1929
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1930
		return;
1931
 
1932
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
4560 Serge 1933
	intel_flush_primary_plane(dev_priv, plane);
3031 serge 1934
    intel_wait_for_vblank(dev_priv->dev, pipe);
2327 Serge 1935
}
1936
 
3746 Serge 1937
static bool need_vtd_wa(struct drm_device *dev)
1938
{
1939
#ifdef CONFIG_INTEL_IOMMU
1940
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
1941
		return true;
1942
#endif
1943
	return false;
1944
}
1945
 
2335 Serge 1946
int
1947
intel_pin_and_fence_fb_obj(struct drm_device *dev,
1948
			   struct drm_i915_gem_object *obj,
1949
			   struct intel_ring_buffer *pipelined)
1950
{
1951
	struct drm_i915_private *dev_priv = dev->dev_private;
1952
	u32 alignment;
1953
	int ret;
2327 Serge 1954
 
2335 Serge 1955
	switch (obj->tiling_mode) {
1956
	case I915_TILING_NONE:
1957
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1958
			alignment = 128 * 1024;
1959
		else if (INTEL_INFO(dev)->gen >= 4)
1960
			alignment = 4 * 1024;
1961
		else
1962
			alignment = 64 * 1024;
1963
		break;
1964
	case I915_TILING_X:
1965
		/* pin() will align the object as required by fence */
1966
		alignment = 0;
1967
		break;
1968
	case I915_TILING_Y:
4560 Serge 1969
		WARN(1, "Y tiled bo slipped through, driver bug!\n");
2335 Serge 1970
		return -EINVAL;
1971
	default:
1972
		BUG();
1973
	}
2327 Serge 1974
 
3746 Serge 1975
	/* Note that the w/a also requires 64 PTE of padding following the
1976
	 * bo. We currently fill all unused PTE with the shadow page and so
1977
	 * we should always have valid PTE following the scanout preventing
1978
	 * the VT-d warning.
1979
	 */
1980
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
1981
		alignment = 256 * 1024;
1982
 
2335 Serge 1983
	dev_priv->mm.interruptible = false;
1984
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1985
	if (ret)
1986
		goto err_interruptible;
2327 Serge 1987
 
2335 Serge 1988
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1989
	 * fence, whereas 965+ only requires a fence if using
1990
	 * framebuffer compression.  For simplicity, we always install
1991
	 * a fence as the cost is not that onerous.
1992
	 */
3480 Serge 1993
	ret = i915_gem_object_get_fence(obj);
1994
	if (ret)
1995
		goto err_unpin;
2327 Serge 1996
 
3480 Serge 1997
	i915_gem_object_pin_fence(obj);
1998
 
2335 Serge 1999
	dev_priv->mm.interruptible = true;
2000
	return 0;
2327 Serge 2001
 
2335 Serge 2002
err_unpin:
4104 Serge 2003
	i915_gem_object_unpin_from_display_plane(obj);
2335 Serge 2004
err_interruptible:
2005
	dev_priv->mm.interruptible = true;
2006
	return ret;
2007
}
2327 Serge 2008
 
3031 serge 2009
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2010
{
2011
//	i915_gem_object_unpin_fence(obj);
2012
//	i915_gem_object_unpin(obj);
2013
}
2014
 
2015
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2016
 * is assumed to be a power-of-two. */
3480 Serge 2017
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2018
					     unsigned int tiling_mode,
2019
					     unsigned int cpp,
3031 serge 2020
							unsigned int pitch)
2021
{
3480 Serge 2022
	if (tiling_mode != I915_TILING_NONE) {
2023
		unsigned int tile_rows, tiles;
3031 serge 2024
 
2025
	tile_rows = *y / 8;
2026
	*y %= 8;
2027
 
3480 Serge 2028
		tiles = *x / (512/cpp);
2029
		*x %= 512/cpp;
2030
 
3031 serge 2031
	return tile_rows * pitch * 8 + tiles * 4096;
3480 Serge 2032
	} else {
2033
		unsigned int offset;
2034
 
2035
		offset = *y * pitch + *x * cpp;
2036
		*y = 0;
2037
		*x = (offset & 4095) / cpp;
2038
		return offset & -4096;
2039
	}
3031 serge 2040
}
2041
 
2327 Serge 2042
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2043
                 int x, int y)
2044
{
2045
    struct drm_device *dev = crtc->dev;
2046
    struct drm_i915_private *dev_priv = dev->dev_private;
2047
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2048
    struct intel_framebuffer *intel_fb;
2049
    struct drm_i915_gem_object *obj;
2050
    int plane = intel_crtc->plane;
3031 serge 2051
	unsigned long linear_offset;
2327 Serge 2052
    u32 dspcntr;
2053
    u32 reg;
2054
 
2055
    switch (plane) {
2056
    case 0:
2057
    case 1:
2058
        break;
2059
    default:
4104 Serge 2060
		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2327 Serge 2061
        return -EINVAL;
2062
    }
2063
 
2064
    intel_fb = to_intel_framebuffer(fb);
2065
    obj = intel_fb->obj;
2066
 
2067
    reg = DSPCNTR(plane);
2068
    dspcntr = I915_READ(reg);
2069
    /* Mask out pixel format bits in case we change it */
2070
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
3243 Serge 2071
	switch (fb->pixel_format) {
2072
	case DRM_FORMAT_C8:
2327 Serge 2073
        dspcntr |= DISPPLANE_8BPP;
2074
        break;
3243 Serge 2075
	case DRM_FORMAT_XRGB1555:
2076
	case DRM_FORMAT_ARGB1555:
2077
		dspcntr |= DISPPLANE_BGRX555;
2078
		break;
2079
	case DRM_FORMAT_RGB565:
2080
		dspcntr |= DISPPLANE_BGRX565;
2081
		break;
2082
	case DRM_FORMAT_XRGB8888:
2083
	case DRM_FORMAT_ARGB8888:
2084
		dspcntr |= DISPPLANE_BGRX888;
2085
		break;
2086
	case DRM_FORMAT_XBGR8888:
2087
	case DRM_FORMAT_ABGR8888:
2088
		dspcntr |= DISPPLANE_RGBX888;
2089
		break;
2090
	case DRM_FORMAT_XRGB2101010:
2091
	case DRM_FORMAT_ARGB2101010:
2092
		dspcntr |= DISPPLANE_BGRX101010;
2327 Serge 2093
        break;
3243 Serge 2094
	case DRM_FORMAT_XBGR2101010:
2095
	case DRM_FORMAT_ABGR2101010:
2096
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 2097
        break;
2098
    default:
3746 Serge 2099
		BUG();
2327 Serge 2100
    }
3243 Serge 2101
 
2327 Serge 2102
    if (INTEL_INFO(dev)->gen >= 4) {
2103
        if (obj->tiling_mode != I915_TILING_NONE)
2104
            dspcntr |= DISPPLANE_TILED;
2105
        else
2106
            dspcntr &= ~DISPPLANE_TILED;
2107
    }
2108
 
4104 Serge 2109
	if (IS_G4X(dev))
2110
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2111
 
2327 Serge 2112
    I915_WRITE(reg, dspcntr);
2113
 
3031 serge 2114
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2327 Serge 2115
 
3031 serge 2116
	if (INTEL_INFO(dev)->gen >= 4) {
2117
		intel_crtc->dspaddr_offset =
3480 Serge 2118
			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
3031 serge 2119
							   fb->bits_per_pixel / 8,
2120
							   fb->pitches[0]);
2121
		linear_offset -= intel_crtc->dspaddr_offset;
2122
	} else {
2123
		intel_crtc->dspaddr_offset = linear_offset;
2124
	}
2125
 
4104 Serge 2126
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2127
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2128
		      fb->pitches[0]);
2342 Serge 2129
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2327 Serge 2130
    if (INTEL_INFO(dev)->gen >= 4) {
4560 Serge 2131
		I915_WRITE(DSPSURF(plane),
4104 Serge 2132
				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2327 Serge 2133
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2134
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2327 Serge 2135
    } else
4104 Serge 2136
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2327 Serge 2137
    POSTING_READ(reg);
2138
 
2139
    return 0;
2140
}
2141
 
2142
static int ironlake_update_plane(struct drm_crtc *crtc,
2143
                 struct drm_framebuffer *fb, int x, int y)
2144
{
2145
    struct drm_device *dev = crtc->dev;
2146
    struct drm_i915_private *dev_priv = dev->dev_private;
2147
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2148
    struct intel_framebuffer *intel_fb;
2149
    struct drm_i915_gem_object *obj;
2150
    int plane = intel_crtc->plane;
3031 serge 2151
	unsigned long linear_offset;
2327 Serge 2152
    u32 dspcntr;
2153
    u32 reg;
2154
 
2155
    switch (plane) {
2156
    case 0:
2157
    case 1:
2342 Serge 2158
	case 2:
2327 Serge 2159
        break;
2160
    default:
4104 Serge 2161
		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2327 Serge 2162
        return -EINVAL;
2163
    }
2164
 
2165
    intel_fb = to_intel_framebuffer(fb);
2166
    obj = intel_fb->obj;
2167
 
2168
    reg = DSPCNTR(plane);
2169
    dspcntr = I915_READ(reg);
2170
    /* Mask out pixel format bits in case we change it */
2171
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
3243 Serge 2172
	switch (fb->pixel_format) {
2173
	case DRM_FORMAT_C8:
2327 Serge 2174
        dspcntr |= DISPPLANE_8BPP;
2175
        break;
3243 Serge 2176
	case DRM_FORMAT_RGB565:
2177
		dspcntr |= DISPPLANE_BGRX565;
2327 Serge 2178
        break;
3243 Serge 2179
	case DRM_FORMAT_XRGB8888:
2180
	case DRM_FORMAT_ARGB8888:
2181
		dspcntr |= DISPPLANE_BGRX888;
2182
		break;
2183
	case DRM_FORMAT_XBGR8888:
2184
	case DRM_FORMAT_ABGR8888:
2185
		dspcntr |= DISPPLANE_RGBX888;
2186
		break;
2187
	case DRM_FORMAT_XRGB2101010:
2188
	case DRM_FORMAT_ARGB2101010:
2189
		dspcntr |= DISPPLANE_BGRX101010;
2190
		break;
2191
	case DRM_FORMAT_XBGR2101010:
2192
	case DRM_FORMAT_ABGR2101010:
2193
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 2194
        break;
2195
    default:
3746 Serge 2196
		BUG();
2327 Serge 2197
    }
2198
 
3480 Serge 2199
	if (obj->tiling_mode != I915_TILING_NONE)
2200
		dspcntr |= DISPPLANE_TILED;
2201
	else
2327 Serge 2202
        dspcntr &= ~DISPPLANE_TILED;
2203
 
4560 Serge 2204
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 2205
		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2206
	else
2327 Serge 2207
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2208
 
2209
    I915_WRITE(reg, dspcntr);
2210
 
3031 serge 2211
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2212
	intel_crtc->dspaddr_offset =
3480 Serge 2213
		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
3031 serge 2214
						   fb->bits_per_pixel / 8,
2215
						   fb->pitches[0]);
2216
	linear_offset -= intel_crtc->dspaddr_offset;
2327 Serge 2217
 
4104 Serge 2218
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2219
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2220
		      fb->pitches[0]);
2342 Serge 2221
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
4560 Serge 2222
	I915_WRITE(DSPSURF(plane),
4104 Serge 2223
			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
4560 Serge 2224
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3243 Serge 2225
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2226
	} else {
2330 Serge 2227
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2228
	I915_WRITE(DSPLINOFF(plane), linear_offset);
3243 Serge 2229
	}
2330 Serge 2230
	POSTING_READ(reg);
2327 Serge 2231
 
2232
    return 0;
2233
}
2234
 
2235
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2236
static int
2237
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2238
			   int x, int y, enum mode_set_atomic state)
2239
{
2240
	struct drm_device *dev = crtc->dev;
2241
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2242
 
2243
	if (dev_priv->display.disable_fbc)
2244
		dev_priv->display.disable_fbc(dev);
2245
	intel_increase_pllclock(crtc);
2246
 
2247
	return dev_priv->display.update_plane(crtc, fb, x, y);
2248
}
2249
 
2250
#if 0
4104 Serge 2251
void intel_display_handle_reset(struct drm_device *dev)
2252
{
2253
	struct drm_i915_private *dev_priv = dev->dev_private;
2254
	struct drm_crtc *crtc;
2255
 
2256
	/*
2257
	 * Flips in the rings have been nuked by the reset,
2258
	 * so complete all pending flips so that user space
2259
	 * will get its events and not get stuck.
2260
	 *
2261
	 * Also update the base address of all primary
2262
	 * planes to the the last fb to make sure we're
2263
	 * showing the correct fb after a reset.
2264
	 *
2265
	 * Need to make two loops over the crtcs so that we
2266
	 * don't try to grab a crtc mutex before the
2267
	 * pending_flip_queue really got woken up.
2268
	 */
2269
 
2270
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2271
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2272
		enum plane plane = intel_crtc->plane;
2273
 
2274
		intel_prepare_page_flip(dev, plane);
2275
		intel_finish_page_flip_plane(dev, plane);
2276
	}
2277
 
2278
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2279
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2280
 
2281
		mutex_lock(&crtc->mutex);
4560 Serge 2282
		/*
2283
		 * FIXME: Once we have proper support for primary planes (and
2284
		 * disabling them without disabling the entire crtc) allow again
2285
		 * a NULL crtc->fb.
2286
		 */
2287
		if (intel_crtc->active && crtc->fb)
4104 Serge 2288
			dev_priv->display.update_plane(crtc, crtc->fb,
2289
						       crtc->x, crtc->y);
2290
		mutex_unlock(&crtc->mutex);
2291
	}
2292
}
2293
 
3031 serge 2294
static int
2295
intel_finish_fb(struct drm_framebuffer *old_fb)
2296
{
2297
	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2298
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2299
	bool was_interruptible = dev_priv->mm.interruptible;
2327 Serge 2300
	int ret;
2301
 
3031 serge 2302
	/* Big Hammer, we also need to ensure that any pending
2303
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2304
	 * current scanout is retired before unpinning the old
2305
	 * framebuffer.
2306
	 *
2307
	 * This should only fail upon a hung GPU, in which case we
2308
	 * can safely continue.
2309
	 */
2310
	dev_priv->mm.interruptible = false;
2311
	ret = i915_gem_object_finish_gpu(obj);
2312
	dev_priv->mm.interruptible = was_interruptible;
2327 Serge 2313
 
3031 serge 2314
	return ret;
2327 Serge 2315
}
4104 Serge 2316
 
2317
static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2318
{
2319
	struct drm_device *dev = crtc->dev;
2320
	struct drm_i915_master_private *master_priv;
2321
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2322
 
2323
	if (!dev->primary->master)
2324
		return;
2325
 
2326
	master_priv = dev->primary->master->driver_priv;
2327
	if (!master_priv->sarea_priv)
2328
		return;
2329
 
2330
	switch (intel_crtc->pipe) {
2331
	case 0:
2332
		master_priv->sarea_priv->pipeA_x = x;
2333
		master_priv->sarea_priv->pipeA_y = y;
2334
		break;
2335
	case 1:
2336
		master_priv->sarea_priv->pipeB_x = x;
2337
		master_priv->sarea_priv->pipeB_y = y;
2338
		break;
2339
	default:
2340
		break;
2341
	}
2342
}
3031 serge 2343
#endif
2327 Serge 2344
 
2345
static int
2346
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
3031 serge 2347
		    struct drm_framebuffer *fb)
2327 Serge 2348
{
2349
	struct drm_device *dev = crtc->dev;
3031 serge 2350
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 2351
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 2352
	struct drm_framebuffer *old_fb;
2342 Serge 2353
	int ret;
2327 Serge 2354
 
2355
	/* no fb bound */
3031 serge 2356
	if (!fb) {
2327 Serge 2357
		DRM_ERROR("No FB bound\n");
2358
		return 0;
2359
	}
2360
 
3746 Serge 2361
	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
4104 Serge 2362
		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2363
			  plane_name(intel_crtc->plane),
3746 Serge 2364
				INTEL_INFO(dev)->num_pipes);
2327 Serge 2365
		return -EINVAL;
2366
	}
2367
 
2368
	mutex_lock(&dev->struct_mutex);
4280 Serge 2369
    ret = intel_pin_and_fence_fb_obj(dev,
2370
                    to_intel_framebuffer(fb)->obj,
2371
                    NULL);
2372
    if (ret != 0) {
2373
       mutex_unlock(&dev->struct_mutex);
2374
       DRM_ERROR("pin & fence failed\n");
2375
       return ret;
2376
    }
2327 Serge 2377
 
4560 Serge 2378
	/*
2379
	 * Update pipe size and adjust fitter if needed: the reason for this is
2380
	 * that in compute_mode_changes we check the native mode (not the pfit
2381
	 * mode) to see if we can flip rather than do a full mode set. In the
2382
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
2383
	 * pfit state, we'll end up with a big fb scanned out into the wrong
2384
	 * sized surface.
2385
	 *
2386
	 * To fix this properly, we need to hoist the checks up into
2387
	 * compute_mode_changes (or above), check the actual pfit state and
2388
	 * whether the platform allows pfit disable with pipe active, and only
2389
	 * then update the pipesrc and pfit state, even on the flip path.
2390
	 */
4280 Serge 2391
	if (i915_fastboot) {
4560 Serge 2392
		const struct drm_display_mode *adjusted_mode =
2393
			&intel_crtc->config.adjusted_mode;
2394
 
4280 Serge 2395
		I915_WRITE(PIPESRC(intel_crtc->pipe),
4560 Serge 2396
			   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2397
			   (adjusted_mode->crtc_vdisplay - 1));
4280 Serge 2398
		if (!intel_crtc->config.pch_pfit.enabled &&
2399
		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2400
		     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2401
			I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2402
			I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2403
			I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2404
		}
4560 Serge 2405
		intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2406
		intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
4280 Serge 2407
	}
3031 serge 2408
 
2409
	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2327 Serge 2410
	if (ret) {
3031 serge 2411
		intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2327 Serge 2412
		mutex_unlock(&dev->struct_mutex);
2413
		DRM_ERROR("failed to update base address\n");
3243 Serge 2414
        return ret;
2327 Serge 2415
	}
2416
 
3031 serge 2417
	old_fb = crtc->fb;
2418
	crtc->fb = fb;
2419
	crtc->x = x;
2420
	crtc->y = y;
2421
 
2422
	if (old_fb) {
4104 Serge 2423
		if (intel_crtc->active && old_fb != fb)
3031 serge 2424
		intel_wait_for_vblank(dev, intel_crtc->pipe);
2425
		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2426
	}
2427
 
2428
	intel_update_fbc(dev);
4104 Serge 2429
	intel_edp_psr_update(dev);
2336 Serge 2430
	mutex_unlock(&dev->struct_mutex);
2327 Serge 2431
 
2336 Serge 2432
    return 0;
2327 Serge 2433
}
2434
 
2435
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2436
{
2437
	struct drm_device *dev = crtc->dev;
2438
	struct drm_i915_private *dev_priv = dev->dev_private;
2439
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2440
	int pipe = intel_crtc->pipe;
2441
	u32 reg, temp;
2442
 
2443
	/* enable normal train */
2444
	reg = FDI_TX_CTL(pipe);
2445
	temp = I915_READ(reg);
2446
	if (IS_IVYBRIDGE(dev)) {
2447
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2448
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2449
	} else {
2450
		temp &= ~FDI_LINK_TRAIN_NONE;
2451
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2452
	}
2453
	I915_WRITE(reg, temp);
2454
 
2455
	reg = FDI_RX_CTL(pipe);
2456
	temp = I915_READ(reg);
2457
	if (HAS_PCH_CPT(dev)) {
2458
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2459
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2460
	} else {
2461
		temp &= ~FDI_LINK_TRAIN_NONE;
2462
		temp |= FDI_LINK_TRAIN_NONE;
2463
	}
2464
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2465
 
2466
	/* wait one idle pattern time */
2467
	POSTING_READ(reg);
2468
	udelay(1000);
2469
 
2470
	/* IVB wants error correction enabled */
2471
	if (IS_IVYBRIDGE(dev))
2472
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2473
			   FDI_FE_ERRC_ENABLE);
2474
}
2475
 
4280 Serge 2476
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
4104 Serge 2477
{
4280 Serge 2478
	return crtc->base.enabled && crtc->active &&
2479
		crtc->config.has_pch_encoder;
4104 Serge 2480
}
2481
 
3243 Serge 2482
static void ivb_modeset_global_resources(struct drm_device *dev)
2327 Serge 2483
{
2484
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 2485
	struct intel_crtc *pipe_B_crtc =
2486
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2487
	struct intel_crtc *pipe_C_crtc =
2488
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2489
	uint32_t temp;
2327 Serge 2490
 
4104 Serge 2491
	/*
2492
	 * When everything is off disable fdi C so that we could enable fdi B
2493
	 * with all lanes. Note that we don't care about enabled pipes without
2494
	 * an enabled pch encoder.
2495
	 */
2496
	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2497
	    !pipe_has_enabled_pch(pipe_C_crtc)) {
3243 Serge 2498
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2499
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2500
 
2501
		temp = I915_READ(SOUTH_CHICKEN1);
2502
		temp &= ~FDI_BC_BIFURCATION_SELECT;
2503
		DRM_DEBUG_KMS("disabling fdi C rx\n");
2504
		I915_WRITE(SOUTH_CHICKEN1, temp);
2505
	}
2327 Serge 2506
}
2507
 
2508
/* The FDI link training functions for ILK/Ibexpeak. */
2509
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2510
{
2511
    struct drm_device *dev = crtc->dev;
2512
    struct drm_i915_private *dev_priv = dev->dev_private;
2513
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2514
    int pipe = intel_crtc->pipe;
2515
    int plane = intel_crtc->plane;
2516
    u32 reg, temp, tries;
2517
 
2518
    /* FDI needs bits from pipe & plane first */
2519
    assert_pipe_enabled(dev_priv, pipe);
2520
    assert_plane_enabled(dev_priv, plane);
2521
 
2522
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2523
       for train result */
2524
    reg = FDI_RX_IMR(pipe);
2525
    temp = I915_READ(reg);
2526
    temp &= ~FDI_RX_SYMBOL_LOCK;
2527
    temp &= ~FDI_RX_BIT_LOCK;
2528
    I915_WRITE(reg, temp);
2529
    I915_READ(reg);
2530
    udelay(150);
2531
 
2532
    /* enable CPU FDI TX and PCH FDI RX */
2533
    reg = FDI_TX_CTL(pipe);
2534
    temp = I915_READ(reg);
4104 Serge 2535
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2536
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 2537
    temp &= ~FDI_LINK_TRAIN_NONE;
2538
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2539
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2540
 
2541
    reg = FDI_RX_CTL(pipe);
2542
    temp = I915_READ(reg);
2543
    temp &= ~FDI_LINK_TRAIN_NONE;
2544
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2545
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2546
 
2547
    POSTING_READ(reg);
2548
    udelay(150);
2549
 
2550
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2551
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2552
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2553
               FDI_RX_PHASE_SYNC_POINTER_EN);
2554
 
2555
    reg = FDI_RX_IIR(pipe);
2556
    for (tries = 0; tries < 5; tries++) {
2557
        temp = I915_READ(reg);
2558
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2559
 
2560
        if ((temp & FDI_RX_BIT_LOCK)) {
2561
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2562
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2563
            break;
2564
        }
2565
    }
2566
    if (tries == 5)
2567
        DRM_ERROR("FDI train 1 fail!\n");
2568
 
2569
    /* Train 2 */
2570
    reg = FDI_TX_CTL(pipe);
2571
    temp = I915_READ(reg);
2572
    temp &= ~FDI_LINK_TRAIN_NONE;
2573
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2574
    I915_WRITE(reg, temp);
2575
 
2576
    reg = FDI_RX_CTL(pipe);
2577
    temp = I915_READ(reg);
2578
    temp &= ~FDI_LINK_TRAIN_NONE;
2579
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2580
    I915_WRITE(reg, temp);
2581
 
2582
    POSTING_READ(reg);
2583
    udelay(150);
2584
 
2585
    reg = FDI_RX_IIR(pipe);
2586
    for (tries = 0; tries < 5; tries++) {
2587
        temp = I915_READ(reg);
2588
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2589
 
2590
        if (temp & FDI_RX_SYMBOL_LOCK) {
2591
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2592
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2593
            break;
2594
        }
2595
    }
2596
    if (tries == 5)
2597
        DRM_ERROR("FDI train 2 fail!\n");
2598
 
2599
    DRM_DEBUG_KMS("FDI train done\n");
2600
 
2601
}
2602
 
2342 Serge 2603
static const int snb_b_fdi_train_param[] = {
2327 Serge 2604
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2605
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2606
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2607
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2608
};
2609
 
2610
/* The FDI link training functions for SNB/Cougarpoint. */
2611
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2612
{
2613
    struct drm_device *dev = crtc->dev;
2614
    struct drm_i915_private *dev_priv = dev->dev_private;
2615
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2616
    int pipe = intel_crtc->pipe;
3031 serge 2617
	u32 reg, temp, i, retry;
2327 Serge 2618
 
2619
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2620
       for train result */
2621
    reg = FDI_RX_IMR(pipe);
2622
    temp = I915_READ(reg);
2623
    temp &= ~FDI_RX_SYMBOL_LOCK;
2624
    temp &= ~FDI_RX_BIT_LOCK;
2625
    I915_WRITE(reg, temp);
2626
 
2627
    POSTING_READ(reg);
2628
    udelay(150);
2629
 
2630
    /* enable CPU FDI TX and PCH FDI RX */
2631
    reg = FDI_TX_CTL(pipe);
2632
    temp = I915_READ(reg);
4104 Serge 2633
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2634
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 2635
    temp &= ~FDI_LINK_TRAIN_NONE;
2636
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2637
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2638
    /* SNB-B */
2639
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2640
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2641
 
3243 Serge 2642
	I915_WRITE(FDI_RX_MISC(pipe),
2643
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2644
 
2327 Serge 2645
    reg = FDI_RX_CTL(pipe);
2646
    temp = I915_READ(reg);
2647
    if (HAS_PCH_CPT(dev)) {
2648
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2649
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2650
    } else {
2651
        temp &= ~FDI_LINK_TRAIN_NONE;
2652
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2653
    }
2654
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2655
 
2656
    POSTING_READ(reg);
2657
    udelay(150);
2658
 
2342 Serge 2659
	for (i = 0; i < 4; i++) {
2327 Serge 2660
        reg = FDI_TX_CTL(pipe);
2661
        temp = I915_READ(reg);
2662
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2663
        temp |= snb_b_fdi_train_param[i];
2664
        I915_WRITE(reg, temp);
2665
 
2666
        POSTING_READ(reg);
2667
        udelay(500);
2668
 
3031 serge 2669
		for (retry = 0; retry < 5; retry++) {
2327 Serge 2670
        reg = FDI_RX_IIR(pipe);
2671
        temp = I915_READ(reg);
2672
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2673
        if (temp & FDI_RX_BIT_LOCK) {
2674
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2675
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2676
            break;
2677
        }
3031 serge 2678
			udelay(50);
2679
		}
2680
		if (retry < 5)
2681
			break;
2327 Serge 2682
    }
2683
    if (i == 4)
2684
        DRM_ERROR("FDI train 1 fail!\n");
2685
 
2686
    /* Train 2 */
2687
    reg = FDI_TX_CTL(pipe);
2688
    temp = I915_READ(reg);
2689
    temp &= ~FDI_LINK_TRAIN_NONE;
2690
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2691
    if (IS_GEN6(dev)) {
2692
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2693
        /* SNB-B */
2694
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2695
    }
2696
    I915_WRITE(reg, temp);
2697
 
2698
    reg = FDI_RX_CTL(pipe);
2699
    temp = I915_READ(reg);
2700
    if (HAS_PCH_CPT(dev)) {
2701
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2702
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2703
    } else {
2704
        temp &= ~FDI_LINK_TRAIN_NONE;
2705
        temp |= FDI_LINK_TRAIN_PATTERN_2;
2706
    }
2707
    I915_WRITE(reg, temp);
2708
 
2709
    POSTING_READ(reg);
2710
    udelay(150);
2711
 
2342 Serge 2712
	for (i = 0; i < 4; i++) {
2327 Serge 2713
        reg = FDI_TX_CTL(pipe);
2714
        temp = I915_READ(reg);
2715
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2716
        temp |= snb_b_fdi_train_param[i];
2717
        I915_WRITE(reg, temp);
2718
 
2719
        POSTING_READ(reg);
2720
        udelay(500);
2721
 
3031 serge 2722
		for (retry = 0; retry < 5; retry++) {
2327 Serge 2723
        reg = FDI_RX_IIR(pipe);
2724
        temp = I915_READ(reg);
2725
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2726
        if (temp & FDI_RX_SYMBOL_LOCK) {
2727
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2728
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2729
            break;
2730
        }
3031 serge 2731
			udelay(50);
2732
		}
2733
		if (retry < 5)
2734
			break;
2327 Serge 2735
    }
2736
    if (i == 4)
2737
        DRM_ERROR("FDI train 2 fail!\n");
2738
 
2739
    DRM_DEBUG_KMS("FDI train done.\n");
2740
}
2741
 
2742
/* Manual link training for Ivy Bridge A0 parts */
2743
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2744
{
2745
    struct drm_device *dev = crtc->dev;
2746
    struct drm_i915_private *dev_priv = dev->dev_private;
2747
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2748
    int pipe = intel_crtc->pipe;
4104 Serge 2749
	u32 reg, temp, i, j;
2327 Serge 2750
 
2751
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2752
       for train result */
2753
    reg = FDI_RX_IMR(pipe);
2754
    temp = I915_READ(reg);
2755
    temp &= ~FDI_RX_SYMBOL_LOCK;
2756
    temp &= ~FDI_RX_BIT_LOCK;
2757
    I915_WRITE(reg, temp);
2758
 
2759
    POSTING_READ(reg);
2760
    udelay(150);
2761
 
3243 Serge 2762
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2763
		      I915_READ(FDI_RX_IIR(pipe)));
2764
 
4104 Serge 2765
	/* Try each vswing and preemphasis setting twice before moving on */
2766
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
2767
		/* disable first in case we need to retry */
2768
		reg = FDI_TX_CTL(pipe);
2769
		temp = I915_READ(reg);
2770
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2771
		temp &= ~FDI_TX_ENABLE;
2772
		I915_WRITE(reg, temp);
2773
 
2774
		reg = FDI_RX_CTL(pipe);
2775
		temp = I915_READ(reg);
2776
		temp &= ~FDI_LINK_TRAIN_AUTO;
2777
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2778
		temp &= ~FDI_RX_ENABLE;
2779
		I915_WRITE(reg, temp);
2780
 
2327 Serge 2781
    /* enable CPU FDI TX and PCH FDI RX */
2782
    reg = FDI_TX_CTL(pipe);
2783
    temp = I915_READ(reg);
4104 Serge 2784
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2785
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 2786
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2787
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4104 Serge 2788
		temp |= snb_b_fdi_train_param[j/2];
2342 Serge 2789
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 2790
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2791
 
3243 Serge 2792
	I915_WRITE(FDI_RX_MISC(pipe),
2793
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2794
 
2327 Serge 2795
    reg = FDI_RX_CTL(pipe);
2796
    temp = I915_READ(reg);
2797
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2342 Serge 2798
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 2799
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2800
 
2801
    POSTING_READ(reg);
4104 Serge 2802
		udelay(1); /* should be 0.5us */
2327 Serge 2803
 
2342 Serge 2804
	for (i = 0; i < 4; i++) {
2327 Serge 2805
        reg = FDI_RX_IIR(pipe);
2806
        temp = I915_READ(reg);
2807
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2808
 
2809
        if (temp & FDI_RX_BIT_LOCK ||
2810
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2811
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4104 Serge 2812
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
2813
					      i);
2327 Serge 2814
            break;
2815
        }
4104 Serge 2816
			udelay(1); /* should be 0.5us */
2817
		}
2818
		if (i == 4) {
2819
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
2820
			continue;
2327 Serge 2821
    }
2822
 
2823
    /* Train 2 */
2824
    reg = FDI_TX_CTL(pipe);
2825
    temp = I915_READ(reg);
2826
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2827
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2828
    I915_WRITE(reg, temp);
2829
 
2830
    reg = FDI_RX_CTL(pipe);
2831
    temp = I915_READ(reg);
2832
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2833
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2834
    I915_WRITE(reg, temp);
2835
 
2836
    POSTING_READ(reg);
4104 Serge 2837
		udelay(2); /* should be 1.5us */
2327 Serge 2838
 
2342 Serge 2839
	for (i = 0; i < 4; i++) {
2327 Serge 2840
        reg = FDI_RX_IIR(pipe);
2841
        temp = I915_READ(reg);
2842
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2843
 
4104 Serge 2844
			if (temp & FDI_RX_SYMBOL_LOCK ||
2845
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2327 Serge 2846
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4104 Serge 2847
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
2848
					      i);
2849
				goto train_done;
2327 Serge 2850
        }
4104 Serge 2851
			udelay(2); /* should be 1.5us */
2327 Serge 2852
    }
2853
    if (i == 4)
4104 Serge 2854
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
2855
	}
2327 Serge 2856
 
4104 Serge 2857
train_done:
2327 Serge 2858
    DRM_DEBUG_KMS("FDI train done.\n");
2859
}
2860
 
3031 serge 2861
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2327 Serge 2862
{
3031 serge 2863
	struct drm_device *dev = intel_crtc->base.dev;
2327 Serge 2864
	struct drm_i915_private *dev_priv = dev->dev_private;
2865
	int pipe = intel_crtc->pipe;
2866
	u32 reg, temp;
2867
 
2868
 
2869
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2870
	reg = FDI_RX_CTL(pipe);
2871
	temp = I915_READ(reg);
4104 Serge 2872
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
2873
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3480 Serge 2874
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 2875
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2876
 
2877
	POSTING_READ(reg);
2878
	udelay(200);
2879
 
2880
	/* Switch from Rawclk to PCDclk */
2881
	temp = I915_READ(reg);
2882
	I915_WRITE(reg, temp | FDI_PCDCLK);
2883
 
2884
	POSTING_READ(reg);
2885
	udelay(200);
2886
 
2887
	/* Enable CPU FDI TX PLL, always on for Ironlake */
2888
	reg = FDI_TX_CTL(pipe);
2889
	temp = I915_READ(reg);
2890
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2891
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2892
 
2893
		POSTING_READ(reg);
2894
		udelay(100);
2895
	}
2896
}
2897
 
3031 serge 2898
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2899
{
2900
	struct drm_device *dev = intel_crtc->base.dev;
2901
	struct drm_i915_private *dev_priv = dev->dev_private;
2902
	int pipe = intel_crtc->pipe;
2903
	u32 reg, temp;
2904
 
2905
	/* Switch from PCDclk to Rawclk */
2906
	reg = FDI_RX_CTL(pipe);
2907
	temp = I915_READ(reg);
2908
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
2909
 
2910
	/* Disable CPU FDI TX PLL */
2911
	reg = FDI_TX_CTL(pipe);
2912
	temp = I915_READ(reg);
2913
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2914
 
2915
	POSTING_READ(reg);
2916
	udelay(100);
2917
 
2918
	reg = FDI_RX_CTL(pipe);
2919
	temp = I915_READ(reg);
2920
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2921
 
2922
	/* Wait for the clocks to turn off. */
2923
	POSTING_READ(reg);
2924
	udelay(100);
2925
}
2926
 
2327 Serge 2927
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2928
{
2929
	struct drm_device *dev = crtc->dev;
2930
	struct drm_i915_private *dev_priv = dev->dev_private;
2931
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2932
	int pipe = intel_crtc->pipe;
2933
	u32 reg, temp;
2934
 
2935
	/* disable CPU FDI tx and PCH FDI rx */
2936
	reg = FDI_TX_CTL(pipe);
2937
	temp = I915_READ(reg);
2938
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2939
	POSTING_READ(reg);
2940
 
2941
	reg = FDI_RX_CTL(pipe);
2942
	temp = I915_READ(reg);
2943
	temp &= ~(0x7 << 16);
3480 Serge 2944
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 2945
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2946
 
2947
	POSTING_READ(reg);
2948
	udelay(100);
2949
 
2950
	/* Ironlake workaround, disable clock pointer after downing FDI */
2951
	if (HAS_PCH_IBX(dev)) {
2952
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2953
	}
2954
 
2955
	/* still set train pattern 1 */
2956
	reg = FDI_TX_CTL(pipe);
2957
	temp = I915_READ(reg);
2958
	temp &= ~FDI_LINK_TRAIN_NONE;
2959
	temp |= FDI_LINK_TRAIN_PATTERN_1;
2960
	I915_WRITE(reg, temp);
2961
 
2962
	reg = FDI_RX_CTL(pipe);
2963
	temp = I915_READ(reg);
2964
	if (HAS_PCH_CPT(dev)) {
2965
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2966
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2967
	} else {
2968
		temp &= ~FDI_LINK_TRAIN_NONE;
2969
		temp |= FDI_LINK_TRAIN_PATTERN_1;
2970
	}
2971
	/* BPC in FDI rx is consistent with that in PIPECONF */
2972
	temp &= ~(0x07 << 16);
3480 Serge 2973
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 2974
	I915_WRITE(reg, temp);
2975
 
2976
	POSTING_READ(reg);
2977
	udelay(100);
2978
}
2979
 
3031 serge 2980
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2327 Serge 2981
{
3031 serge 2982
	struct drm_device *dev = crtc->dev;
2327 Serge 2983
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 2984
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 2985
	unsigned long flags;
2986
	bool pending;
2327 Serge 2987
 
3480 Serge 2988
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2989
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3031 serge 2990
		return false;
2327 Serge 2991
 
3031 serge 2992
	spin_lock_irqsave(&dev->event_lock, flags);
2993
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2994
	spin_unlock_irqrestore(&dev->event_lock, flags);
2995
 
2996
	return pending;
2327 Serge 2997
}
2998
 
3031 serge 2999
#if 0
2327 Serge 3000
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3001
{
3031 serge 3002
	struct drm_device *dev = crtc->dev;
3003
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 3004
 
3005
	if (crtc->fb == NULL)
3006
		return;
3007
 
3480 Serge 3008
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3009
 
2360 Serge 3010
	wait_event(dev_priv->pending_flip_queue,
3031 serge 3011
		   !intel_crtc_has_pending_flip(crtc));
3012
 
3013
	mutex_lock(&dev->struct_mutex);
3014
	intel_finish_fb(crtc->fb);
3015
	mutex_unlock(&dev->struct_mutex);
2327 Serge 3016
}
3031 serge 3017
#endif
2327 Serge 3018
 
3031 serge 3019
/* Program iCLKIP clock to the desired frequency */
3020
static void lpt_program_iclkip(struct drm_crtc *crtc)
3021
{
3022
	struct drm_device *dev = crtc->dev;
3023
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3024
	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3031 serge 3025
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3026
	u32 temp;
3027
 
3480 Serge 3028
	mutex_lock(&dev_priv->dpio_lock);
3029
 
3031 serge 3030
	/* It is necessary to ungate the pixclk gate prior to programming
3031
	 * the divisors, and gate it back when it is done.
3032
	 */
3033
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3034
 
3035
	/* Disable SSCCTL */
3036
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3243 Serge 3037
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3038
				SBI_SSCCTL_DISABLE,
3039
			SBI_ICLK);
3031 serge 3040
 
3041
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
4560 Serge 3042
	if (clock == 20000) {
3031 serge 3043
		auxdiv = 1;
3044
		divsel = 0x41;
3045
		phaseinc = 0x20;
3046
	} else {
3047
		/* The iCLK virtual clock root frequency is in MHz,
4560 Serge 3048
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3049
		 * divisors, it is necessary to divide one by another, so we
3031 serge 3050
		 * convert the virtual clock precision to KHz here for higher
3051
		 * precision.
3052
		 */
3053
		u32 iclk_virtual_root_freq = 172800 * 1000;
3054
		u32 iclk_pi_range = 64;
3055
		u32 desired_divisor, msb_divisor_value, pi_value;
3056
 
4560 Serge 3057
		desired_divisor = (iclk_virtual_root_freq / clock);
3031 serge 3058
		msb_divisor_value = desired_divisor / iclk_pi_range;
3059
		pi_value = desired_divisor % iclk_pi_range;
3060
 
3061
		auxdiv = 0;
3062
		divsel = msb_divisor_value - 2;
3063
		phaseinc = pi_value;
3064
	}
3065
 
3066
	/* This should not happen with any sane values */
3067
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3068
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3069
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3070
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3071
 
3072
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4560 Serge 3073
			clock,
3031 serge 3074
			auxdiv,
3075
			divsel,
3076
			phasedir,
3077
			phaseinc);
3078
 
3079
	/* Program SSCDIVINTPHASE6 */
3243 Serge 3080
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3031 serge 3081
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3082
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3083
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3084
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3085
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3086
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3243 Serge 3087
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3031 serge 3088
 
3089
	/* Program SSCAUXDIV */
3243 Serge 3090
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3031 serge 3091
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3092
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3243 Serge 3093
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3031 serge 3094
 
3095
	/* Enable modulator and associated divider */
3243 Serge 3096
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3031 serge 3097
	temp &= ~SBI_SSCCTL_DISABLE;
3243 Serge 3098
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3031 serge 3099
 
3100
	/* Wait for initialization time */
3101
	udelay(24);
3102
 
3103
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3480 Serge 3104
 
3105
	mutex_unlock(&dev_priv->dpio_lock);
3031 serge 3106
}
3107
 
4104 Serge 3108
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3109
						enum pipe pch_transcoder)
3110
{
3111
	struct drm_device *dev = crtc->base.dev;
3112
	struct drm_i915_private *dev_priv = dev->dev_private;
3113
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3114
 
3115
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3116
		   I915_READ(HTOTAL(cpu_transcoder)));
3117
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3118
		   I915_READ(HBLANK(cpu_transcoder)));
3119
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3120
		   I915_READ(HSYNC(cpu_transcoder)));
3121
 
3122
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3123
		   I915_READ(VTOTAL(cpu_transcoder)));
3124
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3125
		   I915_READ(VBLANK(cpu_transcoder)));
3126
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3127
		   I915_READ(VSYNC(cpu_transcoder)));
3128
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3129
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3130
}
3131
 
4280 Serge 3132
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3133
{
3134
	struct drm_i915_private *dev_priv = dev->dev_private;
3135
	uint32_t temp;
3136
 
3137
	temp = I915_READ(SOUTH_CHICKEN1);
3138
	if (temp & FDI_BC_BIFURCATION_SELECT)
3139
		return;
3140
 
3141
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3142
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3143
 
3144
	temp |= FDI_BC_BIFURCATION_SELECT;
3145
	DRM_DEBUG_KMS("enabling fdi C rx\n");
3146
	I915_WRITE(SOUTH_CHICKEN1, temp);
3147
	POSTING_READ(SOUTH_CHICKEN1);
3148
}
3149
 
3150
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3151
{
3152
	struct drm_device *dev = intel_crtc->base.dev;
3153
	struct drm_i915_private *dev_priv = dev->dev_private;
3154
 
3155
	switch (intel_crtc->pipe) {
3156
	case PIPE_A:
3157
		break;
3158
	case PIPE_B:
3159
		if (intel_crtc->config.fdi_lanes > 2)
3160
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3161
		else
3162
			cpt_enable_fdi_bc_bifurcation(dev);
3163
 
3164
		break;
3165
	case PIPE_C:
3166
		cpt_enable_fdi_bc_bifurcation(dev);
3167
 
3168
		break;
3169
	default:
3170
		BUG();
3171
	}
3172
}
3173
 
2327 Serge 3174
/*
3175
 * Enable PCH resources required for PCH ports:
3176
 *   - PCH PLLs
3177
 *   - FDI training & RX/TX
3178
 *   - update transcoder timings
3179
 *   - DP transcoding bits
3180
 *   - transcoder
3181
 */
3182
static void ironlake_pch_enable(struct drm_crtc *crtc)
3183
{
3184
	struct drm_device *dev = crtc->dev;
3185
	struct drm_i915_private *dev_priv = dev->dev_private;
3186
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3187
	int pipe = intel_crtc->pipe;
3031 serge 3188
	u32 reg, temp;
2327 Serge 3189
 
4104 Serge 3190
	assert_pch_transcoder_disabled(dev_priv, pipe);
3031 serge 3191
 
4280 Serge 3192
	if (IS_IVYBRIDGE(dev))
3193
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3194
 
3243 Serge 3195
	/* Write the TU size bits before fdi link training, so that error
3196
	 * detection works. */
3197
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3198
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3199
 
2327 Serge 3200
	/* For PCH output, training FDI link */
3201
	dev_priv->display.fdi_link_train(crtc);
3202
 
4104 Serge 3203
	/* We need to program the right clock selection before writing the pixel
3204
	 * mutliplier into the DPLL. */
3243 Serge 3205
	if (HAS_PCH_CPT(dev)) {
3031 serge 3206
		u32 sel;
2342 Serge 3207
 
2327 Serge 3208
		temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 3209
		temp |= TRANS_DPLL_ENABLE(pipe);
3210
		sel = TRANS_DPLLB_SEL(pipe);
3211
		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3031 serge 3212
			temp |= sel;
3213
		else
3214
			temp &= ~sel;
2327 Serge 3215
		I915_WRITE(PCH_DPLL_SEL, temp);
3216
	}
3217
 
4104 Serge 3218
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3219
	 * transcoder, and we actually should do this to not upset any PCH
3220
	 * transcoder that already use the clock when we share it.
3221
	 *
3222
	 * Note that enable_shared_dpll tries to do the right thing, but
3223
	 * get_shared_dpll unconditionally resets the pll - we need that to have
3224
	 * the right LVDS enable sequence. */
3225
	ironlake_enable_shared_dpll(intel_crtc);
3226
 
2327 Serge 3227
	/* set transcoder timing, panel must allow it */
3228
	assert_panel_unlocked(dev_priv, pipe);
4104 Serge 3229
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
2327 Serge 3230
 
3231
	intel_fdi_normal_train(crtc);
3232
 
3233
	/* For PCH DP, enable TRANS_DP_CTL */
3234
	if (HAS_PCH_CPT(dev) &&
2342 Serge 3235
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3236
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3480 Serge 3237
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2327 Serge 3238
		reg = TRANS_DP_CTL(pipe);
3239
		temp = I915_READ(reg);
3240
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3241
			  TRANS_DP_SYNC_MASK |
3242
			  TRANS_DP_BPC_MASK);
3243
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3244
			 TRANS_DP_ENH_FRAMING);
3245
		temp |= bpc << 9; /* same format but at 11:9 */
3246
 
3247
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3248
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3249
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3250
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3251
 
3252
		switch (intel_trans_dp_port_sel(crtc)) {
3253
		case PCH_DP_B:
3254
			temp |= TRANS_DP_PORT_SEL_B;
3255
			break;
3256
		case PCH_DP_C:
3257
			temp |= TRANS_DP_PORT_SEL_C;
3258
			break;
3259
		case PCH_DP_D:
3260
			temp |= TRANS_DP_PORT_SEL_D;
3261
			break;
3262
		default:
3243 Serge 3263
			BUG();
2327 Serge 3264
		}
3265
 
3266
		I915_WRITE(reg, temp);
3267
	}
3268
 
3243 Serge 3269
	ironlake_enable_pch_transcoder(dev_priv, pipe);
2327 Serge 3270
}
3271
 
3243 Serge 3272
static void lpt_pch_enable(struct drm_crtc *crtc)
3273
{
3274
	struct drm_device *dev = crtc->dev;
3275
	struct drm_i915_private *dev_priv = dev->dev_private;
3276
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 3277
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 3278
 
4104 Serge 3279
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3243 Serge 3280
 
3281
	lpt_program_iclkip(crtc);
3282
 
3283
	/* Set transcoder timing. */
4104 Serge 3284
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3243 Serge 3285
 
3286
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3287
}
3288
 
4104 Serge 3289
static void intel_put_shared_dpll(struct intel_crtc *crtc)
3031 serge 3290
{
4104 Serge 3291
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3031 serge 3292
 
3293
	if (pll == NULL)
3294
		return;
3295
 
3296
	if (pll->refcount == 0) {
4104 Serge 3297
		WARN(1, "bad %s refcount\n", pll->name);
3031 serge 3298
		return;
3299
	}
3300
 
4104 Serge 3301
	if (--pll->refcount == 0) {
3302
		WARN_ON(pll->on);
3303
		WARN_ON(pll->active);
3304
	}
3305
 
3306
	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3031 serge 3307
}
3308
 
4104 Serge 3309
static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3031 serge 3310
{
4104 Serge 3311
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3312
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3313
	enum intel_dpll_id i;
3031 serge 3314
 
3315
	if (pll) {
4104 Serge 3316
		DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3317
			      crtc->base.base.id, pll->name);
3318
		intel_put_shared_dpll(crtc);
3031 serge 3319
	}
3320
 
3321
	if (HAS_PCH_IBX(dev_priv->dev)) {
3322
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4104 Serge 3323
		i = (enum intel_dpll_id) crtc->pipe;
3324
		pll = &dev_priv->shared_dplls[i];
3031 serge 3325
 
4104 Serge 3326
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3327
			      crtc->base.base.id, pll->name);
3031 serge 3328
 
3329
		goto found;
3330
	}
3331
 
4104 Serge 3332
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3333
		pll = &dev_priv->shared_dplls[i];
3031 serge 3334
 
3335
		/* Only want to check enabled timings first */
3336
		if (pll->refcount == 0)
3337
			continue;
3338
 
4104 Serge 3339
		if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3340
			   sizeof(pll->hw_state)) == 0) {
3341
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3342
				      crtc->base.base.id,
3343
				      pll->name, pll->refcount, pll->active);
3031 serge 3344
 
3345
			goto found;
3346
		}
3347
	}
3348
 
3349
	/* Ok no matching timings, maybe there's a free one? */
4104 Serge 3350
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3351
		pll = &dev_priv->shared_dplls[i];
3031 serge 3352
		if (pll->refcount == 0) {
4104 Serge 3353
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3354
				      crtc->base.base.id, pll->name);
3031 serge 3355
			goto found;
3356
		}
3357
	}
3358
 
3359
	return NULL;
3360
 
3361
found:
4104 Serge 3362
	crtc->config.shared_dpll = i;
3363
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3364
			 pipe_name(crtc->pipe));
3365
 
3366
	if (pll->active == 0) {
3367
		memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
3368
		       sizeof(pll->hw_state));
3369
 
3370
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
3371
		WARN_ON(pll->on);
3372
		assert_shared_dpll_disabled(dev_priv, pll);
3373
 
3374
		pll->mode_set(dev_priv, pll);
3375
	}
3031 serge 3376
	pll->refcount++;
3377
 
3378
	return pll;
3379
}
3380
 
4104 Serge 3381
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
2342 Serge 3382
{
3383
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 3384
	int dslreg = PIPEDSL(pipe);
2342 Serge 3385
	u32 temp;
3386
 
3387
	temp = I915_READ(dslreg);
3388
	udelay(500);
3389
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
3390
		if (wait_for(I915_READ(dslreg) != temp, 5))
4104 Serge 3391
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
2342 Serge 3392
	}
3393
}
3394
 
4104 Serge 3395
static void ironlake_pfit_enable(struct intel_crtc *crtc)
3396
{
3397
	struct drm_device *dev = crtc->base.dev;
3398
	struct drm_i915_private *dev_priv = dev->dev_private;
3399
	int pipe = crtc->pipe;
3400
 
3401
	if (crtc->config.pch_pfit.enabled) {
3402
		/* Force use of hard-coded filter coefficients
3403
		 * as some pre-programmed values are broken,
3404
		 * e.g. x201.
3405
		 */
3406
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3407
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3408
						 PF_PIPE_SEL_IVB(pipe));
3409
		else
3410
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3411
		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3412
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3413
	}
3414
}
3415
 
3416
static void intel_enable_planes(struct drm_crtc *crtc)
3417
{
3418
	struct drm_device *dev = crtc->dev;
3419
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3420
	struct intel_plane *intel_plane;
3421
 
3422
	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3423
		if (intel_plane->pipe == pipe)
3424
			intel_plane_restore(&intel_plane->base);
3425
}
3426
 
3427
static void intel_disable_planes(struct drm_crtc *crtc)
3428
{
3429
	struct drm_device *dev = crtc->dev;
3430
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3431
	struct intel_plane *intel_plane;
3432
 
3433
	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3434
		if (intel_plane->pipe == pipe)
3435
			intel_plane_disable(&intel_plane->base);
3436
}
3437
 
4560 Serge 3438
void hsw_enable_ips(struct intel_crtc *crtc)
3439
{
3440
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3441
 
3442
	if (!crtc->config.ips_enabled)
3443
		return;
3444
 
3445
	/* We can only enable IPS after we enable a plane and wait for a vblank.
3446
	 * We guarantee that the plane is enabled by calling intel_enable_ips
3447
	 * only after intel_enable_plane. And intel_enable_plane already waits
3448
	 * for a vblank, so all we need to do here is to enable the IPS bit. */
3449
	assert_plane_enabled(dev_priv, crtc->plane);
3450
	if (IS_BROADWELL(crtc->base.dev)) {
3451
		mutex_lock(&dev_priv->rps.hw_lock);
3452
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3453
		mutex_unlock(&dev_priv->rps.hw_lock);
3454
		/* Quoting Art Runyan: "its not safe to expect any particular
3455
		 * value in IPS_CTL bit 31 after enabling IPS through the
3456
		 * mailbox." Moreover, the mailbox may return a bogus state,
3457
		 * so we need to just enable it and continue on.
3458
		 */
3459
	} else {
3460
		I915_WRITE(IPS_CTL, IPS_ENABLE);
3461
		/* The bit only becomes 1 in the next vblank, so this wait here
3462
		 * is essentially intel_wait_for_vblank. If we don't have this
3463
		 * and don't wait for vblanks until the end of crtc_enable, then
3464
		 * the HW state readout code will complain that the expected
3465
		 * IPS_CTL value is not the one we read. */
3466
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3467
			DRM_ERROR("Timed out waiting for IPS enable\n");
3468
	}
3469
}
3470
 
3471
void hsw_disable_ips(struct intel_crtc *crtc)
3472
{
3473
	struct drm_device *dev = crtc->base.dev;
3474
	struct drm_i915_private *dev_priv = dev->dev_private;
3475
 
3476
	if (!crtc->config.ips_enabled)
3477
		return;
3478
 
3479
	assert_plane_enabled(dev_priv, crtc->plane);
3480
	if (IS_BROADWELL(crtc->base.dev)) {
3481
		mutex_lock(&dev_priv->rps.hw_lock);
3482
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3483
		mutex_unlock(&dev_priv->rps.hw_lock);
3484
	} else {
3485
		I915_WRITE(IPS_CTL, 0);
3486
		POSTING_READ(IPS_CTL);
3487
	}
3488
 
3489
	/* We need to wait for a vblank before we can disable the plane. */
3490
	intel_wait_for_vblank(dev, crtc->pipe);
3491
}
3492
 
3493
/** Loads the palette/gamma unit for the CRTC with the prepared values */
3494
static void intel_crtc_load_lut(struct drm_crtc *crtc)
3495
{
3496
	struct drm_device *dev = crtc->dev;
3497
	struct drm_i915_private *dev_priv = dev->dev_private;
3498
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3499
	enum pipe pipe = intel_crtc->pipe;
3500
	int palreg = PALETTE(pipe);
3501
	int i;
3502
	bool reenable_ips = false;
3503
 
3504
	/* The clocks have to be on to load the palette. */
3505
	if (!crtc->enabled || !intel_crtc->active)
3506
		return;
3507
 
3508
	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3509
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3510
			assert_dsi_pll_enabled(dev_priv);
3511
		else
3512
			assert_pll_enabled(dev_priv, pipe);
3513
	}
3514
 
3515
	/* use legacy palette for Ironlake */
3516
	if (HAS_PCH_SPLIT(dev))
3517
		palreg = LGC_PALETTE(pipe);
3518
 
3519
	/* Workaround : Do not read or write the pipe palette/gamma data while
3520
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3521
	 */
3522
	if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3523
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3524
	     GAMMA_MODE_MODE_SPLIT)) {
3525
		hsw_disable_ips(intel_crtc);
3526
		reenable_ips = true;
3527
	}
3528
 
3529
	for (i = 0; i < 256; i++) {
3530
		I915_WRITE(palreg + 4 * i,
3531
			   (intel_crtc->lut_r[i] << 16) |
3532
			   (intel_crtc->lut_g[i] << 8) |
3533
			   intel_crtc->lut_b[i]);
3534
	}
3535
 
3536
	if (reenable_ips)
3537
		hsw_enable_ips(intel_crtc);
3538
}
3539
 
2327 Serge 3540
static void ironlake_crtc_enable(struct drm_crtc *crtc)
3541
{
3542
    struct drm_device *dev = crtc->dev;
3543
    struct drm_i915_private *dev_priv = dev->dev_private;
3544
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3545
	struct intel_encoder *encoder;
2327 Serge 3546
    int pipe = intel_crtc->pipe;
3547
    int plane = intel_crtc->plane;
3548
 
3031 serge 3549
	WARN_ON(!crtc->enabled);
3550
 
2327 Serge 3551
    if (intel_crtc->active)
3552
        return;
3553
 
3554
    intel_crtc->active = true;
4104 Serge 3555
 
3556
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3557
	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3558
 
3559
	for_each_encoder_on_crtc(dev, crtc, encoder)
3560
		if (encoder->pre_enable)
3561
			encoder->pre_enable(encoder);
2327 Serge 3562
 
3746 Serge 3563
	if (intel_crtc->config.has_pch_encoder) {
3243 Serge 3564
		/* Note: FDI PLL enabling _must_ be done before we enable the
3565
		 * cpu pipes, hence this is separate from all the other fdi/pch
3566
		 * enabling. */
3031 serge 3567
		ironlake_fdi_pll_enable(intel_crtc);
3568
	} else {
3569
		assert_fdi_tx_disabled(dev_priv, pipe);
3570
		assert_fdi_rx_disabled(dev_priv, pipe);
3571
	}
2327 Serge 3572
 
4104 Serge 3573
	ironlake_pfit_enable(intel_crtc);
3031 serge 3574
 
2327 Serge 3575
    /*
3576
     * On ILK+ LUT must be loaded before the pipe is running but with
3577
     * clocks enabled
3578
     */
3579
    intel_crtc_load_lut(crtc);
3580
 
4560 Serge 3581
	intel_update_watermarks(crtc);
3746 Serge 3582
	intel_enable_pipe(dev_priv, pipe,
4560 Serge 3583
			  intel_crtc->config.has_pch_encoder, false);
3584
	intel_enable_primary_plane(dev_priv, plane, pipe);
4104 Serge 3585
	intel_enable_planes(crtc);
4557 Serge 3586
	intel_crtc_update_cursor(crtc, true);
2327 Serge 3587
 
3746 Serge 3588
	if (intel_crtc->config.has_pch_encoder)
2327 Serge 3589
        ironlake_pch_enable(crtc);
3590
 
3591
    mutex_lock(&dev->struct_mutex);
3592
    intel_update_fbc(dev);
3593
    mutex_unlock(&dev->struct_mutex);
3594
 
3031 serge 3595
	for_each_encoder_on_crtc(dev, crtc, encoder)
3596
		encoder->enable(encoder);
3597
 
3598
	if (HAS_PCH_CPT(dev))
4104 Serge 3599
		cpt_verify_modeset(dev, intel_crtc->pipe);
3031 serge 3600
 
3601
	/*
3602
	 * There seems to be a race in PCH platform hw (at least on some
3603
	 * outputs) where an enabled pipe still completes any pageflip right
3604
	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3605
	 * as the first vblank happend, everything works as expected. Hence just
3606
	 * wait for one vblank before returning to avoid strange things
3607
	 * happening.
3608
	 */
3609
	intel_wait_for_vblank(dev, intel_crtc->pipe);
2327 Serge 3610
}
3611
 
4104 Serge 3612
/* IPS only exists on ULT machines and is tied to pipe A. */
3613
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3614
{
3615
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3616
}
3617
 
4560 Serge 3618
static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
4104 Serge 3619
{
4560 Serge 3620
	struct drm_device *dev = crtc->dev;
3621
	struct drm_i915_private *dev_priv = dev->dev_private;
3622
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3623
	int pipe = intel_crtc->pipe;
3624
	int plane = intel_crtc->plane;
4104 Serge 3625
 
4560 Serge 3626
	intel_enable_primary_plane(dev_priv, plane, pipe);
3627
	intel_enable_planes(crtc);
3628
	intel_crtc_update_cursor(crtc, true);
4104 Serge 3629
 
4560 Serge 3630
	hsw_enable_ips(intel_crtc);
3631
 
3632
	mutex_lock(&dev->struct_mutex);
3633
	intel_update_fbc(dev);
3634
	mutex_unlock(&dev->struct_mutex);
4104 Serge 3635
}
3636
 
4560 Serge 3637
static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
4104 Serge 3638
{
4560 Serge 3639
	struct drm_device *dev = crtc->dev;
4104 Serge 3640
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3641
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3642
	int pipe = intel_crtc->pipe;
3643
	int plane = intel_crtc->plane;
4104 Serge 3644
 
4560 Serge 3645
//   intel_crtc_wait_for_pending_flips(crtc);
3646
//   drm_vblank_off(dev, pipe);
3647
 
3648
	/* FBC must be disabled before disabling the plane on HSW. */
3649
	if (dev_priv->fbc.plane == plane)
3650
		intel_disable_fbc(dev);
3651
 
3652
	hsw_disable_ips(intel_crtc);
3653
 
3654
	intel_crtc_update_cursor(crtc, false);
3655
	intel_disable_planes(crtc);
3656
	intel_disable_primary_plane(dev_priv, plane, pipe);
3657
}
3658
 
3659
/*
3660
 * This implements the workaround described in the "notes" section of the mode
3661
 * set sequence documentation. When going from no pipes or single pipe to
3662
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
3663
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
3664
 */
3665
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
3666
{
3667
	struct drm_device *dev = crtc->base.dev;
3668
	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
3669
 
3670
	/* We want to get the other_active_crtc only if there's only 1 other
3671
	 * active crtc. */
3672
	list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
3673
		if (!crtc_it->active || crtc_it == crtc)
3674
			continue;
3675
 
3676
		if (other_active_crtc)
4104 Serge 3677
		return;
3678
 
4560 Serge 3679
		other_active_crtc = crtc_it;
3680
	}
3681
	if (!other_active_crtc)
3682
		return;
4104 Serge 3683
 
4560 Serge 3684
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
3685
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4104 Serge 3686
}
3687
 
3243 Serge 3688
static void haswell_crtc_enable(struct drm_crtc *crtc)
3689
{
3690
	struct drm_device *dev = crtc->dev;
3691
	struct drm_i915_private *dev_priv = dev->dev_private;
3692
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3693
	struct intel_encoder *encoder;
3694
	int pipe = intel_crtc->pipe;
3695
 
3696
	WARN_ON(!crtc->enabled);
3697
 
3698
	if (intel_crtc->active)
3699
		return;
3700
 
3701
	intel_crtc->active = true;
4104 Serge 3702
 
3703
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3704
	if (intel_crtc->config.has_pch_encoder)
3705
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3706
 
3746 Serge 3707
	if (intel_crtc->config.has_pch_encoder)
3243 Serge 3708
		dev_priv->display.fdi_link_train(crtc);
3709
 
3710
	for_each_encoder_on_crtc(dev, crtc, encoder)
3711
		if (encoder->pre_enable)
3712
			encoder->pre_enable(encoder);
3713
 
3714
	intel_ddi_enable_pipe_clock(intel_crtc);
3715
 
4104 Serge 3716
	ironlake_pfit_enable(intel_crtc);
3243 Serge 3717
 
3718
	/*
3719
	 * On ILK+ LUT must be loaded before the pipe is running but with
3720
	 * clocks enabled
3721
	 */
3722
	intel_crtc_load_lut(crtc);
3723
 
3724
	intel_ddi_set_pipe_settings(crtc);
3746 Serge 3725
	intel_ddi_enable_transcoder_func(crtc);
3243 Serge 3726
 
4560 Serge 3727
	intel_update_watermarks(crtc);
3746 Serge 3728
	intel_enable_pipe(dev_priv, pipe,
4560 Serge 3729
			  intel_crtc->config.has_pch_encoder, false);
3243 Serge 3730
 
3746 Serge 3731
	if (intel_crtc->config.has_pch_encoder)
3243 Serge 3732
		lpt_pch_enable(crtc);
3733
 
4560 Serge 3734
	for_each_encoder_on_crtc(dev, crtc, encoder) {
3243 Serge 3735
		encoder->enable(encoder);
4560 Serge 3736
		intel_opregion_notify_encoder(encoder, true);
3737
	}
3243 Serge 3738
 
4560 Serge 3739
	/* If we change the relative order between pipe/planes enabling, we need
3740
	 * to change the workaround. */
3741
	haswell_mode_set_planes_workaround(intel_crtc);
3742
	haswell_crtc_enable_planes(crtc);
3743
 
3243 Serge 3744
	/*
3745
	 * There seems to be a race in PCH platform hw (at least on some
3746
	 * outputs) where an enabled pipe still completes any pageflip right
3747
	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3748
	 * as the first vblank happend, everything works as expected. Hence just
3749
	 * wait for one vblank before returning to avoid strange things
3750
	 * happening.
3751
	 */
3752
	intel_wait_for_vblank(dev, intel_crtc->pipe);
3753
}
3754
 
4104 Serge 3755
static void ironlake_pfit_disable(struct intel_crtc *crtc)
3756
{
3757
	struct drm_device *dev = crtc->base.dev;
3758
	struct drm_i915_private *dev_priv = dev->dev_private;
3759
	int pipe = crtc->pipe;
3760
 
3761
	/* To avoid upsetting the power well on haswell only disable the pfit if
3762
	 * it's in use. The hw state code will make sure we get this right. */
3763
	if (crtc->config.pch_pfit.enabled) {
3764
		I915_WRITE(PF_CTL(pipe), 0);
3765
		I915_WRITE(PF_WIN_POS(pipe), 0);
3766
		I915_WRITE(PF_WIN_SZ(pipe), 0);
3767
	}
3768
}
3769
 
2327 Serge 3770
static void ironlake_crtc_disable(struct drm_crtc *crtc)
3771
{
3772
    struct drm_device *dev = crtc->dev;
3773
    struct drm_i915_private *dev_priv = dev->dev_private;
3774
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3775
	struct intel_encoder *encoder;
2327 Serge 3776
    int pipe = intel_crtc->pipe;
3777
    int plane = intel_crtc->plane;
3778
    u32 reg, temp;
3779
 
3031 serge 3780
 
2327 Serge 3781
    if (!intel_crtc->active)
3782
        return;
3783
 
3031 serge 3784
	for_each_encoder_on_crtc(dev, crtc, encoder)
3785
		encoder->disable(encoder);
2336 Serge 3786
 
3031 serge 3787
//    intel_crtc_wait_for_pending_flips(crtc);
2327 Serge 3788
//    drm_vblank_off(dev, pipe);
3789
 
4104 Serge 3790
	if (dev_priv->fbc.plane == plane)
3791
		intel_disable_fbc(dev);
3792
 
4557 Serge 3793
	intel_crtc_update_cursor(crtc, false);
4104 Serge 3794
	intel_disable_planes(crtc);
4560 Serge 3795
	intel_disable_primary_plane(dev_priv, plane, pipe);
2327 Serge 3796
 
4104 Serge 3797
	if (intel_crtc->config.has_pch_encoder)
3798
		intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
2327 Serge 3799
 
3800
    intel_disable_pipe(dev_priv, pipe);
3801
 
4104 Serge 3802
	ironlake_pfit_disable(intel_crtc);
2327 Serge 3803
 
3031 serge 3804
	for_each_encoder_on_crtc(dev, crtc, encoder)
3805
		if (encoder->post_disable)
3806
			encoder->post_disable(encoder);
3807
 
4104 Serge 3808
	if (intel_crtc->config.has_pch_encoder) {
2327 Serge 3809
    ironlake_fdi_disable(crtc);
3810
 
3243 Serge 3811
	ironlake_disable_pch_transcoder(dev_priv, pipe);
4104 Serge 3812
		intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
2327 Serge 3813
 
3814
    if (HAS_PCH_CPT(dev)) {
3815
        /* disable TRANS_DP_CTL */
3816
        reg = TRANS_DP_CTL(pipe);
3817
        temp = I915_READ(reg);
4104 Serge 3818
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3819
				  TRANS_DP_PORT_SEL_MASK);
2327 Serge 3820
        temp |= TRANS_DP_PORT_SEL_NONE;
3821
        I915_WRITE(reg, temp);
3822
 
3823
        /* disable DPLL_SEL */
3824
        temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 3825
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
2327 Serge 3826
        I915_WRITE(PCH_DPLL_SEL, temp);
3827
    }
3828
 
3829
    /* disable PCH DPLL */
4104 Serge 3830
		intel_disable_shared_dpll(intel_crtc);
2327 Serge 3831
 
3031 serge 3832
	ironlake_fdi_pll_disable(intel_crtc);
4104 Serge 3833
	}
2327 Serge 3834
 
3835
    intel_crtc->active = false;
4560 Serge 3836
	intel_update_watermarks(crtc);
2327 Serge 3837
 
3838
    mutex_lock(&dev->struct_mutex);
3839
    intel_update_fbc(dev);
3840
    mutex_unlock(&dev->struct_mutex);
3841
}
3842
 
3243 Serge 3843
static void haswell_crtc_disable(struct drm_crtc *crtc)
3844
{
3845
	struct drm_device *dev = crtc->dev;
3846
	struct drm_i915_private *dev_priv = dev->dev_private;
3847
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3848
	struct intel_encoder *encoder;
3849
	int pipe = intel_crtc->pipe;
3746 Serge 3850
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 3851
 
3852
	if (!intel_crtc->active)
3853
		return;
3854
 
4560 Serge 3855
	haswell_crtc_disable_planes(crtc);
3856
 
3857
	for_each_encoder_on_crtc(dev, crtc, encoder) {
3858
		intel_opregion_notify_encoder(encoder, false);
3243 Serge 3859
		encoder->disable(encoder);
4560 Serge 3860
	}
3243 Serge 3861
 
4104 Serge 3862
	if (intel_crtc->config.has_pch_encoder)
3863
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
3243 Serge 3864
	intel_disable_pipe(dev_priv, pipe);
3865
 
3866
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3867
 
4104 Serge 3868
	ironlake_pfit_disable(intel_crtc);
3243 Serge 3869
 
3870
	intel_ddi_disable_pipe_clock(intel_crtc);
3871
 
3872
	for_each_encoder_on_crtc(dev, crtc, encoder)
3873
		if (encoder->post_disable)
3874
			encoder->post_disable(encoder);
3875
 
3746 Serge 3876
	if (intel_crtc->config.has_pch_encoder) {
3243 Serge 3877
		lpt_disable_pch_transcoder(dev_priv);
4104 Serge 3878
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3243 Serge 3879
		intel_ddi_fdi_disable(crtc);
3880
	}
3881
 
3882
	intel_crtc->active = false;
4560 Serge 3883
	intel_update_watermarks(crtc);
3243 Serge 3884
 
3885
	mutex_lock(&dev->struct_mutex);
3886
	intel_update_fbc(dev);
3887
	mutex_unlock(&dev->struct_mutex);
3888
}
3889
 
3031 serge 3890
static void ironlake_crtc_off(struct drm_crtc *crtc)
2327 Serge 3891
{
3892
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4104 Serge 3893
	intel_put_shared_dpll(intel_crtc);
2327 Serge 3894
}
3895
 
3243 Serge 3896
static void haswell_crtc_off(struct drm_crtc *crtc)
3897
{
3898
	intel_ddi_put_crtc_pll(crtc);
3899
}
3900
 
2327 Serge 3901
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3902
{
3903
	if (!enable && intel_crtc->overlay) {
3904
		struct drm_device *dev = intel_crtc->base.dev;
3905
		struct drm_i915_private *dev_priv = dev->dev_private;
3906
 
3907
		mutex_lock(&dev->struct_mutex);
3908
		dev_priv->mm.interruptible = false;
3909
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3910
		dev_priv->mm.interruptible = true;
3911
		mutex_unlock(&dev->struct_mutex);
3912
	}
3913
 
3914
	/* Let userspace switch the overlay on again. In most cases userspace
3915
	 * has to recompute where to put it anyway.
3916
	 */
3917
}
3918
 
3480 Serge 3919
/**
3920
 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3921
 * cursor plane briefly if not already running after enabling the display
3922
 * plane.
3923
 * This workaround avoids occasional blank screens when self refresh is
3924
 * enabled.
3925
 */
3926
static void
3927
g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3928
{
3929
	u32 cntl = I915_READ(CURCNTR(pipe));
3930
 
3931
	if ((cntl & CURSOR_MODE) == 0) {
3932
		u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3933
 
3934
		I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3935
		I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3936
		intel_wait_for_vblank(dev_priv->dev, pipe);
3937
		I915_WRITE(CURCNTR(pipe), cntl);
3938
		I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3939
		I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3940
	}
3941
}
3942
 
4104 Serge 3943
static void i9xx_pfit_enable(struct intel_crtc *crtc)
3944
{
3945
	struct drm_device *dev = crtc->base.dev;
3946
	struct drm_i915_private *dev_priv = dev->dev_private;
3947
	struct intel_crtc_config *pipe_config = &crtc->config;
3948
 
3949
	if (!crtc->config.gmch_pfit.control)
3950
		return;
3951
 
3952
	/*
3953
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3954
	 * according to register description and PRM.
3955
	 */
3956
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
3957
	assert_pipe_disabled(dev_priv, crtc->pipe);
3958
 
3959
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
3960
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
3961
 
3962
	/* Border color in case we don't scale up to the full screen. Black by
3963
	 * default, change to something else for debugging. */
3964
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
3965
}
3966
 
4560 Serge 3967
int valleyview_get_vco(struct drm_i915_private *dev_priv)
3968
{
3969
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
3970
 
3971
	/* Obtain SKU information */
3972
	mutex_lock(&dev_priv->dpio_lock);
3973
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
3974
		CCK_FUSE_HPLL_FREQ_MASK;
3975
	mutex_unlock(&dev_priv->dpio_lock);
3976
 
3977
	return vco_freq[hpll_freq];
3978
}
3979
 
3980
/* Adjust CDclk dividers to allow high res or save power if possible */
3981
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
3982
{
3983
	struct drm_i915_private *dev_priv = dev->dev_private;
3984
	u32 val, cmd;
3985
 
3986
	if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
3987
		cmd = 2;
3988
	else if (cdclk == 266)
3989
		cmd = 1;
3990
	else
3991
		cmd = 0;
3992
 
3993
	mutex_lock(&dev_priv->rps.hw_lock);
3994
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
3995
	val &= ~DSPFREQGUAR_MASK;
3996
	val |= (cmd << DSPFREQGUAR_SHIFT);
3997
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
3998
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
3999
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4000
		     50)) {
4001
		DRM_ERROR("timed out waiting for CDclk change\n");
4002
	}
4003
	mutex_unlock(&dev_priv->rps.hw_lock);
4004
 
4005
	if (cdclk == 400) {
4006
		u32 divider, vco;
4007
 
4008
		vco = valleyview_get_vco(dev_priv);
4009
		divider = ((vco << 1) / cdclk) - 1;
4010
 
4011
		mutex_lock(&dev_priv->dpio_lock);
4012
		/* adjust cdclk divider */
4013
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4014
		val &= ~0xf;
4015
		val |= divider;
4016
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4017
		mutex_unlock(&dev_priv->dpio_lock);
4018
	}
4019
 
4020
	mutex_lock(&dev_priv->dpio_lock);
4021
	/* adjust self-refresh exit latency value */
4022
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4023
	val &= ~0x7f;
4024
 
4025
	/*
4026
	 * For high bandwidth configs, we set a higher latency in the bunit
4027
	 * so that the core display fetch happens in time to avoid underruns.
4028
	 */
4029
	if (cdclk == 400)
4030
		val |= 4500 / 250; /* 4.5 usec */
4031
	else
4032
		val |= 3000 / 250; /* 3.0 usec */
4033
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4034
	mutex_unlock(&dev_priv->dpio_lock);
4035
 
4036
	/* Since we changed the CDclk, we need to update the GMBUSFREQ too */
4037
	intel_i2c_reset(dev);
4038
}
4039
 
4040
static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
4041
{
4042
	int cur_cdclk, vco;
4043
	int divider;
4044
 
4045
	vco = valleyview_get_vco(dev_priv);
4046
 
4047
	mutex_lock(&dev_priv->dpio_lock);
4048
	divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4049
	mutex_unlock(&dev_priv->dpio_lock);
4050
 
4051
	divider &= 0xf;
4052
 
4053
	cur_cdclk = (vco << 1) / (divider + 1);
4054
 
4055
	return cur_cdclk;
4056
}
4057
 
4058
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4059
				 int max_pixclk)
4060
{
4061
	int cur_cdclk;
4062
 
4063
	cur_cdclk = valleyview_cur_cdclk(dev_priv);
4064
 
4065
	/*
4066
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
4067
	 *   200MHz
4068
	 *   267MHz
4069
	 *   320MHz
4070
	 *   400MHz
4071
	 * So we check to see whether we're above 90% of the lower bin and
4072
	 * adjust if needed.
4073
	 */
4074
	if (max_pixclk > 288000) {
4075
		return 400;
4076
	} else if (max_pixclk > 240000) {
4077
		return 320;
4078
	} else
4079
		return 266;
4080
	/* Looks like the 200MHz CDclk freq doesn't work on some configs */
4081
}
4082
 
4083
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
4084
				 unsigned modeset_pipes,
4085
				 struct intel_crtc_config *pipe_config)
4086
{
4087
	struct drm_device *dev = dev_priv->dev;
4088
	struct intel_crtc *intel_crtc;
4089
	int max_pixclk = 0;
4090
 
4091
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
4092
			    base.head) {
4093
		if (modeset_pipes & (1 << intel_crtc->pipe))
4094
			max_pixclk = max(max_pixclk,
4095
					 pipe_config->adjusted_mode.crtc_clock);
4096
		else if (intel_crtc->base.enabled)
4097
			max_pixclk = max(max_pixclk,
4098
					 intel_crtc->config.adjusted_mode.crtc_clock);
4099
	}
4100
 
4101
	return max_pixclk;
4102
}
4103
 
4104
static void valleyview_modeset_global_pipes(struct drm_device *dev,
4105
					    unsigned *prepare_pipes,
4106
					    unsigned modeset_pipes,
4107
					    struct intel_crtc_config *pipe_config)
4108
{
4109
	struct drm_i915_private *dev_priv = dev->dev_private;
4110
	struct intel_crtc *intel_crtc;
4111
	int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
4112
					       pipe_config);
4113
	int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4114
 
4115
	if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
4116
		return;
4117
 
4118
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
4119
			    base.head)
4120
		if (intel_crtc->base.enabled)
4121
			*prepare_pipes |= (1 << intel_crtc->pipe);
4122
}
4123
 
4124
static void valleyview_modeset_global_resources(struct drm_device *dev)
4125
{
4126
	struct drm_i915_private *dev_priv = dev->dev_private;
4127
	int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
4128
	int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4129
	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4130
 
4131
	if (req_cdclk != cur_cdclk)
4132
		valleyview_set_cdclk(dev, req_cdclk);
4133
}
4134
 
4104 Serge 4135
static void valleyview_crtc_enable(struct drm_crtc *crtc)
4136
{
4137
	struct drm_device *dev = crtc->dev;
4138
	struct drm_i915_private *dev_priv = dev->dev_private;
4139
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4140
	struct intel_encoder *encoder;
4141
	int pipe = intel_crtc->pipe;
4142
	int plane = intel_crtc->plane;
4560 Serge 4143
	bool is_dsi;
4104 Serge 4144
 
4145
	WARN_ON(!crtc->enabled);
4146
 
4147
	if (intel_crtc->active)
4148
		return;
4149
 
4150
	intel_crtc->active = true;
4151
 
4152
	for_each_encoder_on_crtc(dev, crtc, encoder)
4153
		if (encoder->pre_pll_enable)
4154
			encoder->pre_pll_enable(encoder);
4155
 
4560 Serge 4156
	is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4157
 
4158
	if (!is_dsi)
4104 Serge 4159
	vlv_enable_pll(intel_crtc);
4160
 
4161
	for_each_encoder_on_crtc(dev, crtc, encoder)
4162
		if (encoder->pre_enable)
4163
			encoder->pre_enable(encoder);
4164
 
4165
	i9xx_pfit_enable(intel_crtc);
4166
 
4167
	intel_crtc_load_lut(crtc);
4168
 
4560 Serge 4169
	intel_update_watermarks(crtc);
4170
	intel_enable_pipe(dev_priv, pipe, false, is_dsi);
4171
	intel_enable_primary_plane(dev_priv, plane, pipe);
4104 Serge 4172
	intel_enable_planes(crtc);
4557 Serge 4173
	intel_crtc_update_cursor(crtc, true);
4104 Serge 4174
 
4175
	intel_update_fbc(dev);
4176
 
4177
	for_each_encoder_on_crtc(dev, crtc, encoder)
4178
		encoder->enable(encoder);
4179
}
4180
 
2327 Serge 4181
static void i9xx_crtc_enable(struct drm_crtc *crtc)
4182
{
4183
    struct drm_device *dev = crtc->dev;
4184
    struct drm_i915_private *dev_priv = dev->dev_private;
4185
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4186
	struct intel_encoder *encoder;
2327 Serge 4187
    int pipe = intel_crtc->pipe;
4188
    int plane = intel_crtc->plane;
4189
 
3031 serge 4190
	WARN_ON(!crtc->enabled);
4191
 
2327 Serge 4192
    if (intel_crtc->active)
4193
        return;
4194
 
4195
    intel_crtc->active = true;
4196
 
3480 Serge 4197
	for_each_encoder_on_crtc(dev, crtc, encoder)
4198
		if (encoder->pre_enable)
4199
			encoder->pre_enable(encoder);
4200
 
4104 Serge 4201
	i9xx_enable_pll(intel_crtc);
4202
 
4203
	i9xx_pfit_enable(intel_crtc);
4204
 
4205
	intel_crtc_load_lut(crtc);
4206
 
4560 Serge 4207
	intel_update_watermarks(crtc);
4208
	intel_enable_pipe(dev_priv, pipe, false, false);
4209
	intel_enable_primary_plane(dev_priv, plane, pipe);
4104 Serge 4210
	intel_enable_planes(crtc);
4211
	/* The fixup needs to happen before cursor is enabled */
3480 Serge 4212
	if (IS_G4X(dev))
4213
		g4x_fixup_plane(dev_priv, pipe);
4557 Serge 4214
	intel_crtc_update_cursor(crtc, true);
2327 Serge 4215
 
4216
    /* Give the overlay scaler a chance to enable if it's on this pipe */
4217
    intel_crtc_dpms_overlay(intel_crtc, true);
3031 serge 4218
 
4104 Serge 4219
	intel_update_fbc(dev);
4220
 
3031 serge 4221
	for_each_encoder_on_crtc(dev, crtc, encoder)
4222
		encoder->enable(encoder);
2327 Serge 4223
}
4224
 
3746 Serge 4225
static void i9xx_pfit_disable(struct intel_crtc *crtc)
4226
{
4227
	struct drm_device *dev = crtc->base.dev;
4228
	struct drm_i915_private *dev_priv = dev->dev_private;
4229
 
4104 Serge 4230
	if (!crtc->config.gmch_pfit.control)
4231
		return;
4232
 
3746 Serge 4233
	assert_pipe_disabled(dev_priv, crtc->pipe);
4234
 
4104 Serge 4235
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4236
			 I915_READ(PFIT_CONTROL));
3746 Serge 4237
		I915_WRITE(PFIT_CONTROL, 0);
4238
}
4239
 
2327 Serge 4240
static void i9xx_crtc_disable(struct drm_crtc *crtc)
4241
{
4242
    struct drm_device *dev = crtc->dev;
4243
    struct drm_i915_private *dev_priv = dev->dev_private;
4244
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 4245
	struct intel_encoder *encoder;
2327 Serge 4246
    int pipe = intel_crtc->pipe;
4247
    int plane = intel_crtc->plane;
4248
 
4249
    if (!intel_crtc->active)
4250
        return;
4251
 
3031 serge 4252
	for_each_encoder_on_crtc(dev, crtc, encoder)
4253
		encoder->disable(encoder);
4254
 
2327 Serge 4255
    /* Give the overlay scaler a chance to disable if it's on this pipe */
3031 serge 4256
//    intel_crtc_wait_for_pending_flips(crtc);
2327 Serge 4257
//    drm_vblank_off(dev, pipe);
4258
 
4104 Serge 4259
	if (dev_priv->fbc.plane == plane)
2327 Serge 4260
        intel_disable_fbc(dev);
4261
 
4104 Serge 4262
	intel_crtc_dpms_overlay(intel_crtc, false);
4557 Serge 4263
	intel_crtc_update_cursor(crtc, false);
4104 Serge 4264
	intel_disable_planes(crtc);
4560 Serge 4265
	intel_disable_primary_plane(dev_priv, plane, pipe);
4104 Serge 4266
 
2327 Serge 4267
    intel_disable_pipe(dev_priv, pipe);
3480 Serge 4268
 
3746 Serge 4269
	i9xx_pfit_disable(intel_crtc);
3480 Serge 4270
 
4104 Serge 4271
	for_each_encoder_on_crtc(dev, crtc, encoder)
4272
		if (encoder->post_disable)
4273
			encoder->post_disable(encoder);
2327 Serge 4274
 
4560 Serge 4275
	if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
4557 Serge 4276
		vlv_disable_pll(dev_priv, pipe);
4560 Serge 4277
	else if (!IS_VALLEYVIEW(dev))
4104 Serge 4278
	i9xx_disable_pll(dev_priv, pipe);
4279
 
2327 Serge 4280
    intel_crtc->active = false;
4560 Serge 4281
	intel_update_watermarks(crtc);
4282
 
2327 Serge 4283
    intel_update_fbc(dev);
4284
}
4285
 
3031 serge 4286
static void i9xx_crtc_off(struct drm_crtc *crtc)
2327 Serge 4287
{
4288
}
4289
 
3031 serge 4290
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4291
				    bool enabled)
2330 Serge 4292
{
4293
	struct drm_device *dev = crtc->dev;
4294
	struct drm_i915_master_private *master_priv;
4295
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4296
	int pipe = intel_crtc->pipe;
2327 Serge 4297
 
4298
 
2340 Serge 4299
#if 0
2330 Serge 4300
	if (!dev->primary->master)
4301
		return;
2327 Serge 4302
 
2330 Serge 4303
	master_priv = dev->primary->master->driver_priv;
4304
	if (!master_priv->sarea_priv)
4305
		return;
2327 Serge 4306
 
2330 Serge 4307
	switch (pipe) {
4308
	case 0:
4309
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
4310
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
4311
		break;
4312
	case 1:
4313
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
4314
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
4315
		break;
4316
	default:
4317
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
4318
		break;
4319
	}
2340 Serge 4320
#endif
4321
 
2330 Serge 4322
}
2327 Serge 4323
 
3031 serge 4324
/**
4325
 * Sets the power management mode of the pipe and plane.
4326
 */
4327
void intel_crtc_update_dpms(struct drm_crtc *crtc)
4328
{
4329
	struct drm_device *dev = crtc->dev;
4330
	struct drm_i915_private *dev_priv = dev->dev_private;
4331
	struct intel_encoder *intel_encoder;
4332
	bool enable = false;
4333
 
4334
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4335
		enable |= intel_encoder->connectors_active;
4336
 
4337
	if (enable)
4338
		dev_priv->display.crtc_enable(crtc);
4339
	else
4340
		dev_priv->display.crtc_disable(crtc);
4341
 
4342
	intel_crtc_update_sarea(crtc, enable);
4343
}
4344
 
2330 Serge 4345
static void intel_crtc_disable(struct drm_crtc *crtc)
4346
{
4347
	struct drm_device *dev = crtc->dev;
3031 serge 4348
	struct drm_connector *connector;
4349
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 4350
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 4351
 
3031 serge 4352
	/* crtc should still be enabled when we disable it. */
4353
	WARN_ON(!crtc->enabled);
2327 Serge 4354
 
4104 Serge 4355
	dev_priv->display.crtc_disable(crtc);
3480 Serge 4356
	intel_crtc->eld_vld = false;
3031 serge 4357
	intel_crtc_update_sarea(crtc, false);
4358
	dev_priv->display.off(crtc);
4359
 
4360
	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4560 Serge 4361
	assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
3031 serge 4362
	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
4363
 
4280 Serge 4364
	if (crtc->fb) {
4365
		mutex_lock(&dev->struct_mutex);
4366
		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
4367
		mutex_unlock(&dev->struct_mutex);
4368
		crtc->fb = NULL;
4369
	}
3031 serge 4370
 
4371
	/* Update computed state. */
4372
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4373
		if (!connector->encoder || !connector->encoder->crtc)
4374
			continue;
4375
 
4376
		if (connector->encoder->crtc != crtc)
4377
			continue;
4378
 
4379
		connector->dpms = DRM_MODE_DPMS_OFF;
4380
		to_intel_encoder(connector->encoder)->connectors_active = false;
2330 Serge 4381
	}
4382
}
2327 Serge 4383
 
3031 serge 4384
void intel_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 4385
{
3031 serge 4386
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4387
 
4388
	drm_encoder_cleanup(encoder);
4389
	kfree(intel_encoder);
2330 Serge 4390
}
2327 Serge 4391
 
4104 Serge 4392
/* Simple dpms helper for encoders with just one connector, no cloning and only
3031 serge 4393
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
4394
 * state of the entire output pipe. */
4104 Serge 4395
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
2330 Serge 4396
{
3031 serge 4397
	if (mode == DRM_MODE_DPMS_ON) {
4398
		encoder->connectors_active = true;
4399
 
4400
		intel_crtc_update_dpms(encoder->base.crtc);
4401
	} else {
4402
		encoder->connectors_active = false;
4403
 
4404
		intel_crtc_update_dpms(encoder->base.crtc);
4405
	}
2330 Serge 4406
}
2327 Serge 4407
 
3031 serge 4408
/* Cross check the actual hw state with our own modeset state tracking (and it's
4409
 * internal consistency). */
4410
static void intel_connector_check_state(struct intel_connector *connector)
2330 Serge 4411
{
3031 serge 4412
	if (connector->get_hw_state(connector)) {
4413
		struct intel_encoder *encoder = connector->encoder;
4414
		struct drm_crtc *crtc;
4415
		bool encoder_enabled;
4416
		enum pipe pipe;
4417
 
4418
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4419
			      connector->base.base.id,
4420
			      drm_get_connector_name(&connector->base));
4421
 
4422
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
4423
		     "wrong connector dpms state\n");
4424
		WARN(connector->base.encoder != &encoder->base,
4425
		     "active connector not linked to encoder\n");
4426
		WARN(!encoder->connectors_active,
4427
		     "encoder->connectors_active not set\n");
4428
 
4429
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
4430
		WARN(!encoder_enabled, "encoder not enabled\n");
4431
		if (WARN_ON(!encoder->base.crtc))
4432
			return;
4433
 
4434
		crtc = encoder->base.crtc;
4435
 
4436
		WARN(!crtc->enabled, "crtc not enabled\n");
4437
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
4438
		WARN(pipe != to_intel_crtc(crtc)->pipe,
4439
		     "encoder active on the wrong pipe\n");
4440
	}
2330 Serge 4441
}
2327 Serge 4442
 
3031 serge 4443
/* Even simpler default implementation, if there's really no special case to
4444
 * consider. */
4445
void intel_connector_dpms(struct drm_connector *connector, int mode)
2330 Serge 4446
{
3031 serge 4447
	/* All the simple cases only support two dpms states. */
4448
	if (mode != DRM_MODE_DPMS_ON)
4449
		mode = DRM_MODE_DPMS_OFF;
2342 Serge 4450
 
3031 serge 4451
	if (mode == connector->dpms)
4452
		return;
4453
 
4454
	connector->dpms = mode;
4455
 
4456
	/* Only need to change hw state when actually enabled */
4104 Serge 4457
	if (connector->encoder)
4458
		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
3031 serge 4459
 
4460
	intel_modeset_check_state(connector->dev);
2330 Serge 4461
}
2327 Serge 4462
 
3031 serge 4463
/* Simple connector->get_hw_state implementation for encoders that support only
4464
 * one connector and no cloning and hence the encoder state determines the state
4465
 * of the connector. */
4466
bool intel_connector_get_hw_state(struct intel_connector *connector)
2330 Serge 4467
{
3031 serge 4468
	enum pipe pipe = 0;
4469
	struct intel_encoder *encoder = connector->encoder;
2330 Serge 4470
 
3031 serge 4471
	return encoder->get_hw_state(encoder, &pipe);
2330 Serge 4472
}
4473
 
4104 Serge 4474
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
4475
				     struct intel_crtc_config *pipe_config)
4476
{
4477
	struct drm_i915_private *dev_priv = dev->dev_private;
4478
	struct intel_crtc *pipe_B_crtc =
4479
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
4480
 
4481
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
4482
		      pipe_name(pipe), pipe_config->fdi_lanes);
4483
	if (pipe_config->fdi_lanes > 4) {
4484
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
4485
			      pipe_name(pipe), pipe_config->fdi_lanes);
4486
		return false;
4487
	}
4488
 
4560 Serge 4489
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4104 Serge 4490
		if (pipe_config->fdi_lanes > 2) {
4491
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
4492
				      pipe_config->fdi_lanes);
4493
			return false;
4494
		} else {
4495
			return true;
4496
		}
4497
	}
4498
 
4499
	if (INTEL_INFO(dev)->num_pipes == 2)
4500
		return true;
4501
 
4502
	/* Ivybridge 3 pipe is really complicated */
4503
	switch (pipe) {
4504
	case PIPE_A:
4505
		return true;
4506
	case PIPE_B:
4507
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
4508
		    pipe_config->fdi_lanes > 2) {
4509
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4510
				      pipe_name(pipe), pipe_config->fdi_lanes);
4511
			return false;
4512
		}
4513
		return true;
4514
	case PIPE_C:
4515
		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
4516
		    pipe_B_crtc->config.fdi_lanes <= 2) {
4517
			if (pipe_config->fdi_lanes > 2) {
4518
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4519
					      pipe_name(pipe), pipe_config->fdi_lanes);
4520
				return false;
4521
			}
4522
		} else {
4523
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
4524
			return false;
4525
		}
4526
		return true;
4527
	default:
4528
		BUG();
4529
	}
4530
}
4531
 
4532
#define RETRY 1
4533
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
3746 Serge 4534
				      struct intel_crtc_config *pipe_config)
2330 Serge 4535
{
4104 Serge 4536
	struct drm_device *dev = intel_crtc->base.dev;
3746 Serge 4537
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4104 Serge 4538
	int lane, link_bw, fdi_dotclock;
4539
	bool setup_ok, needs_recompute = false;
2330 Serge 4540
 
4104 Serge 4541
retry:
4542
	/* FDI is a binary signal running at ~2.7GHz, encoding
4543
	 * each output octet as 10 bits. The actual frequency
4544
	 * is stored as a divider into a 100MHz clock, and the
4545
	 * mode pixel clock is stored in units of 1KHz.
4546
	 * Hence the bw of each lane in terms of the mode signal
4547
	 * is:
4548
	 */
4549
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4550
 
4560 Serge 4551
	fdi_dotclock = adjusted_mode->crtc_clock;
4104 Serge 4552
 
4553
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
4554
					   pipe_config->pipe_bpp);
4555
 
4556
	pipe_config->fdi_lanes = lane;
4557
 
4558
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
4559
			       link_bw, &pipe_config->fdi_m_n);
4560
 
4561
	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
4562
					    intel_crtc->pipe, pipe_config);
4563
	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
4564
		pipe_config->pipe_bpp -= 2*3;
4565
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
4566
			      pipe_config->pipe_bpp);
4567
		needs_recompute = true;
4568
		pipe_config->bw_constrained = true;
4569
 
4570
		goto retry;
4571
	}
4572
 
4573
	if (needs_recompute)
4574
		return RETRY;
4575
 
4576
	return setup_ok ? 0 : -EINVAL;
4577
}
4578
 
4579
static void hsw_compute_ips_config(struct intel_crtc *crtc,
4580
				   struct intel_crtc_config *pipe_config)
4581
{
4582
	pipe_config->ips_enabled = i915_enable_ips &&
4583
				   hsw_crtc_supports_ips(crtc) &&
4584
				   pipe_config->pipe_bpp <= 24;
4585
}
4586
 
4587
static int intel_crtc_compute_config(struct intel_crtc *crtc,
4588
				     struct intel_crtc_config *pipe_config)
4589
{
4590
	struct drm_device *dev = crtc->base.dev;
4591
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4592
 
4560 Serge 4593
	/* FIXME should check pixel clock limits on all platforms */
4594
	if (INTEL_INFO(dev)->gen < 4) {
4595
		struct drm_i915_private *dev_priv = dev->dev_private;
4596
		int clock_limit =
4597
			dev_priv->display.get_display_clock_speed(dev);
4598
 
4599
		/*
4600
		 * Enable pixel doubling when the dot clock
4601
		 * is > 90% of the (display) core speed.
4602
		 *
4603
		 * GDG double wide on either pipe,
4604
		 * otherwise pipe A only.
4605
		 */
4606
		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
4607
		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
4608
			clock_limit *= 2;
4609
			pipe_config->double_wide = true;
4610
		}
4611
 
4612
		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4104 Serge 4613
			return -EINVAL;
2330 Serge 4614
	}
4615
 
4560 Serge 4616
	/*
4617
	 * Pipe horizontal size must be even in:
4618
	 * - DVO ganged mode
4619
	 * - LVDS dual channel mode
4620
	 * - Double wide pipe
4621
	 */
4622
	if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4623
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
4624
		pipe_config->pipe_src_w &= ~1;
4625
 
4104 Serge 4626
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4627
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
3031 serge 4628
	 */
4629
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
4630
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
4104 Serge 4631
		return -EINVAL;
3031 serge 4632
 
3746 Serge 4633
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
4634
		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
4635
	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
4636
		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
4637
		 * for lvds. */
4638
		pipe_config->pipe_bpp = 8*3;
4639
	}
4640
 
4104 Serge 4641
	if (HAS_IPS(dev))
4642
		hsw_compute_ips_config(crtc, pipe_config);
4643
 
4644
	/* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
4645
	 * clock survives for now. */
4646
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4647
		pipe_config->shared_dpll = crtc->config.shared_dpll;
4648
 
4649
	if (pipe_config->has_pch_encoder)
4650
		return ironlake_fdi_compute_config(crtc, pipe_config);
4651
 
4652
	return 0;
2330 Serge 4653
}
4654
 
3031 serge 4655
static int valleyview_get_display_clock_speed(struct drm_device *dev)
4656
{
4657
	return 400000; /* FIXME */
4658
}
4659
 
2327 Serge 4660
static int i945_get_display_clock_speed(struct drm_device *dev)
4661
{
4662
	return 400000;
4663
}
4664
 
4665
static int i915_get_display_clock_speed(struct drm_device *dev)
4666
{
4667
	return 333000;
4668
}
4669
 
4670
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
4671
{
4672
	return 200000;
4673
}
4674
 
4104 Serge 4675
static int pnv_get_display_clock_speed(struct drm_device *dev)
4676
{
4677
	u16 gcfgc = 0;
4678
 
4679
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4680
 
4681
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4682
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
4683
		return 267000;
4684
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
4685
		return 333000;
4686
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
4687
		return 444000;
4688
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
4689
		return 200000;
4690
	default:
4691
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
4692
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
4693
		return 133000;
4694
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
4695
		return 167000;
4696
	}
4697
}
4698
 
2327 Serge 4699
static int i915gm_get_display_clock_speed(struct drm_device *dev)
4700
{
4701
	u16 gcfgc = 0;
4702
 
4703
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4704
 
4705
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
4706
		return 133000;
4707
	else {
4708
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4709
		case GC_DISPLAY_CLOCK_333_MHZ:
4710
			return 333000;
4711
		default:
4712
		case GC_DISPLAY_CLOCK_190_200_MHZ:
4713
			return 190000;
4714
		}
4715
	}
4716
}
4717
 
4718
static int i865_get_display_clock_speed(struct drm_device *dev)
4719
{
4720
	return 266000;
4721
}
4722
 
4723
static int i855_get_display_clock_speed(struct drm_device *dev)
4724
{
4725
	u16 hpllcc = 0;
4726
	/* Assume that the hardware is in the high speed state.  This
4727
	 * should be the default.
4728
	 */
4729
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
4730
	case GC_CLOCK_133_200:
4731
	case GC_CLOCK_100_200:
4732
		return 200000;
4733
	case GC_CLOCK_166_250:
4734
		return 250000;
4735
	case GC_CLOCK_100_133:
4736
		return 133000;
4737
	}
4738
 
4739
	/* Shouldn't happen */
4740
	return 0;
4741
}
4742
 
4743
static int i830_get_display_clock_speed(struct drm_device *dev)
4744
{
4745
	return 133000;
4746
}
4747
 
4748
static void
3746 Serge 4749
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2327 Serge 4750
{
3746 Serge 4751
	while (*num > DATA_LINK_M_N_MASK ||
4752
	       *den > DATA_LINK_M_N_MASK) {
2327 Serge 4753
		*num >>= 1;
4754
		*den >>= 1;
4755
	}
4756
}
4757
 
3746 Serge 4758
static void compute_m_n(unsigned int m, unsigned int n,
4759
			uint32_t *ret_m, uint32_t *ret_n)
4760
{
4761
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4762
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
4763
	intel_reduce_m_n_ratio(ret_m, ret_n);
4764
}
4765
 
3480 Serge 4766
void
4767
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4768
		       int pixel_clock, int link_clock,
4769
		       struct intel_link_m_n *m_n)
2327 Serge 4770
{
3480 Serge 4771
	m_n->tu = 64;
3746 Serge 4772
 
4773
	compute_m_n(bits_per_pixel * pixel_clock,
4774
		    link_clock * nlanes * 8,
4775
		    &m_n->gmch_m, &m_n->gmch_n);
4776
 
4777
	compute_m_n(pixel_clock, link_clock,
4778
		    &m_n->link_m, &m_n->link_n);
2327 Serge 4779
}
4780
 
4781
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4782
{
2342 Serge 4783
	if (i915_panel_use_ssc >= 0)
4784
		return i915_panel_use_ssc != 0;
4104 Serge 4785
	return dev_priv->vbt.lvds_use_ssc
2327 Serge 4786
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4787
}
4788
 
3031 serge 4789
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4790
{
4791
	struct drm_device *dev = crtc->dev;
4792
	struct drm_i915_private *dev_priv = dev->dev_private;
4793
	int refclk;
2327 Serge 4794
 
3031 serge 4795
	if (IS_VALLEYVIEW(dev)) {
4560 Serge 4796
		refclk = 100000;
3031 serge 4797
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4798
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 4799
		refclk = dev_priv->vbt.lvds_ssc_freq;
4800
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
3031 serge 4801
	} else if (!IS_GEN2(dev)) {
4802
		refclk = 96000;
4803
	} else {
4804
		refclk = 48000;
4805
	}
2327 Serge 4806
 
3031 serge 4807
	return refclk;
4808
}
2327 Serge 4809
 
4104 Serge 4810
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
3031 serge 4811
{
4104 Serge 4812
	return (1 << dpll->n) << 16 | dpll->m2;
4813
}
3746 Serge 4814
 
4104 Serge 4815
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
4816
{
4817
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
3031 serge 4818
}
2327 Serge 4819
 
3746 Serge 4820
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
3031 serge 4821
				     intel_clock_t *reduced_clock)
4822
{
3746 Serge 4823
	struct drm_device *dev = crtc->base.dev;
3031 serge 4824
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 4825
	int pipe = crtc->pipe;
3031 serge 4826
	u32 fp, fp2 = 0;
2327 Serge 4827
 
3031 serge 4828
	if (IS_PINEVIEW(dev)) {
4104 Serge 4829
		fp = pnv_dpll_compute_fp(&crtc->config.dpll);
3031 serge 4830
		if (reduced_clock)
4104 Serge 4831
			fp2 = pnv_dpll_compute_fp(reduced_clock);
3031 serge 4832
	} else {
4104 Serge 4833
		fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
3031 serge 4834
		if (reduced_clock)
4104 Serge 4835
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
3031 serge 4836
	}
2327 Serge 4837
 
3031 serge 4838
	I915_WRITE(FP0(pipe), fp);
4104 Serge 4839
	crtc->config.dpll_hw_state.fp0 = fp;
2327 Serge 4840
 
3746 Serge 4841
	crtc->lowfreq_avail = false;
4842
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
3031 serge 4843
	    reduced_clock && i915_powersave) {
4844
		I915_WRITE(FP1(pipe), fp2);
4104 Serge 4845
		crtc->config.dpll_hw_state.fp1 = fp2;
3746 Serge 4846
		crtc->lowfreq_avail = true;
3031 serge 4847
	} else {
4848
		I915_WRITE(FP1(pipe), fp);
4104 Serge 4849
		crtc->config.dpll_hw_state.fp1 = fp;
3031 serge 4850
	}
4851
}
2327 Serge 4852
 
4560 Serge 4853
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
4854
		pipe)
4104 Serge 4855
{
4856
	u32 reg_val;
4857
 
4858
	/*
4859
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
4860
	 * and set it to a reasonable value instead.
4861
	 */
4560 Serge 4862
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 4863
	reg_val &= 0xffffff00;
4864
	reg_val |= 0x00000030;
4560 Serge 4865
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 4866
 
4560 Serge 4867
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 4868
	reg_val &= 0x8cffffff;
4869
	reg_val = 0x8c000000;
4560 Serge 4870
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 4871
 
4560 Serge 4872
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4104 Serge 4873
	reg_val &= 0xffffff00;
4560 Serge 4874
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4104 Serge 4875
 
4560 Serge 4876
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4104 Serge 4877
	reg_val &= 0x00ffffff;
4878
	reg_val |= 0xb0000000;
4560 Serge 4879
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4104 Serge 4880
}
4881
 
4882
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
4883
					 struct intel_link_m_n *m_n)
4884
{
4885
	struct drm_device *dev = crtc->base.dev;
4886
	struct drm_i915_private *dev_priv = dev->dev_private;
4887
	int pipe = crtc->pipe;
4888
 
4889
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4890
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4891
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4892
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4893
}
4894
 
4895
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
4896
					 struct intel_link_m_n *m_n)
4897
{
4898
	struct drm_device *dev = crtc->base.dev;
4899
	struct drm_i915_private *dev_priv = dev->dev_private;
4900
	int pipe = crtc->pipe;
4901
	enum transcoder transcoder = crtc->config.cpu_transcoder;
4902
 
4903
	if (INTEL_INFO(dev)->gen >= 5) {
4904
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
4905
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
4906
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
4907
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
4908
	} else {
4909
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4910
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4911
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
4912
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
4913
	}
4914
}
4915
 
3746 Serge 4916
static void intel_dp_set_m_n(struct intel_crtc *crtc)
3031 serge 4917
{
3746 Serge 4918
	if (crtc->config.has_pch_encoder)
4919
		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4920
	else
4921
		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4922
}
4923
 
4924
static void vlv_update_pll(struct intel_crtc *crtc)
4925
{
4926
	struct drm_device *dev = crtc->base.dev;
3031 serge 4927
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 4928
	int pipe = crtc->pipe;
4104 Serge 4929
	u32 dpll, mdiv;
3031 serge 4930
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
4104 Serge 4931
	u32 coreclk, reg_val, dpll_md;
2327 Serge 4932
 
3480 Serge 4933
	mutex_lock(&dev_priv->dpio_lock);
4934
 
3746 Serge 4935
	bestn = crtc->config.dpll.n;
4936
	bestm1 = crtc->config.dpll.m1;
4937
	bestm2 = crtc->config.dpll.m2;
4938
	bestp1 = crtc->config.dpll.p1;
4939
	bestp2 = crtc->config.dpll.p2;
3031 serge 4940
 
4104 Serge 4941
	/* See eDP HDMI DPIO driver vbios notes doc */
4942
 
4943
	/* PLL B needs special handling */
4944
	if (pipe)
4560 Serge 4945
		vlv_pllb_recal_opamp(dev_priv, pipe);
4104 Serge 4946
 
4947
	/* Set up Tx target for periodic Rcomp update */
4560 Serge 4948
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
4104 Serge 4949
 
4950
	/* Disable target IRef on PLL */
4560 Serge 4951
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
4104 Serge 4952
	reg_val &= 0x00ffffff;
4560 Serge 4953
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
4104 Serge 4954
 
4955
	/* Disable fast lock */
4560 Serge 4956
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
4104 Serge 4957
 
4958
	/* Set idtafcrecal before PLL is enabled */
3031 serge 4959
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4960
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4961
	mdiv |= ((bestn << DPIO_N_SHIFT));
4962
	mdiv |= (1 << DPIO_K_SHIFT);
4104 Serge 4963
 
4964
	/*
4965
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
4966
	 * but we don't support that).
4967
	 * Note: don't use the DAC post divider as it seems unstable.
4968
	 */
4969
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4560 Serge 4970
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4104 Serge 4971
 
3031 serge 4972
	mdiv |= DPIO_ENABLE_CALIBRATION;
4560 Serge 4973
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
3031 serge 4974
 
4104 Serge 4975
	/* Set HBR and RBR LPF coefficients */
4976
	if (crtc->config.port_clock == 162000 ||
4977
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4978
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4560 Serge 4979
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 4980
				 0x009f0003);
4981
	else
4560 Serge 4982
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4104 Serge 4983
				 0x00d0000f);
3031 serge 4984
 
4104 Serge 4985
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4986
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
4987
		/* Use SSC source */
4988
		if (!pipe)
4560 Serge 4989
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 4990
					 0x0df40000);
4991
		else
4560 Serge 4992
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 4993
					 0x0df70000);
4994
	} else { /* HDMI or VGA */
4995
		/* Use bend source */
4996
		if (!pipe)
4560 Serge 4997
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 4998
					 0x0df70000);
4999
		else
4560 Serge 5000
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4104 Serge 5001
					 0x0df40000);
5002
	}
3031 serge 5003
 
4560 Serge 5004
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
4104 Serge 5005
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5006
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5007
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5008
		coreclk |= 0x01000000;
4560 Serge 5009
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
3031 serge 5010
 
4560 Serge 5011
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
4104 Serge 5012
 
4560 Serge 5013
	/*
5014
	 * Enable DPIO clock input. We should never disable the reference
5015
	 * clock for pipe B, since VGA hotplug / manual detection depends
5016
	 * on it.
5017
	 */
4104 Serge 5018
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5019
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
4398 Serge 5020
	/* We should never disable this, set it here for state tracking */
5021
	if (pipe == PIPE_B)
4104 Serge 5022
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
3031 serge 5023
	dpll |= DPLL_VCO_ENABLE;
4104 Serge 5024
	crtc->config.dpll_hw_state.dpll = dpll;
3031 serge 5025
 
4104 Serge 5026
	dpll_md = (crtc->config.pixel_multiplier - 1)
5027
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5028
	crtc->config.dpll_hw_state.dpll_md = dpll_md;
3031 serge 5029
 
3746 Serge 5030
	if (crtc->config.has_dp_encoder)
5031
		intel_dp_set_m_n(crtc);
3243 Serge 5032
 
3480 Serge 5033
	mutex_unlock(&dev_priv->dpio_lock);
3031 serge 5034
}
5035
 
3746 Serge 5036
static void i9xx_update_pll(struct intel_crtc *crtc,
5037
			    intel_clock_t *reduced_clock,
3031 serge 5038
			    int num_connectors)
5039
{
3746 Serge 5040
	struct drm_device *dev = crtc->base.dev;
3031 serge 5041
	struct drm_i915_private *dev_priv = dev->dev_private;
5042
	u32 dpll;
5043
	bool is_sdvo;
3746 Serge 5044
	struct dpll *clock = &crtc->config.dpll;
3031 serge 5045
 
3746 Serge 5046
	i9xx_update_pll_dividers(crtc, reduced_clock);
3243 Serge 5047
 
3746 Serge 5048
	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
5049
		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
3031 serge 5050
 
5051
	dpll = DPLL_VGA_MODE_DIS;
5052
 
3746 Serge 5053
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
3031 serge 5054
		dpll |= DPLLB_MODE_LVDS;
5055
	else
5056
		dpll |= DPLLB_MODE_DAC_SERIAL;
3746 Serge 5057
 
4104 Serge 5058
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
3746 Serge 5059
			dpll |= (crtc->config.pixel_multiplier - 1)
5060
				<< SDVO_MULTIPLIER_SHIFT_HIRES;
2342 Serge 5061
		}
4104 Serge 5062
 
5063
	if (is_sdvo)
5064
		dpll |= DPLL_SDVO_HIGH_SPEED;
5065
 
3746 Serge 5066
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
4104 Serge 5067
		dpll |= DPLL_SDVO_HIGH_SPEED;
2342 Serge 5068
 
3031 serge 5069
	/* compute bitmask from p1 value */
5070
	if (IS_PINEVIEW(dev))
5071
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5072
	else {
5073
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5074
		if (IS_G4X(dev) && reduced_clock)
5075
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5076
	}
5077
	switch (clock->p2) {
5078
	case 5:
5079
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5080
		break;
5081
	case 7:
5082
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5083
		break;
5084
	case 10:
5085
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5086
		break;
5087
	case 14:
5088
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5089
		break;
5090
	}
5091
	if (INTEL_INFO(dev)->gen >= 4)
5092
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2327 Serge 5093
 
4104 Serge 5094
	if (crtc->config.sdvo_tv_clock)
3031 serge 5095
		dpll |= PLL_REF_INPUT_TVCLKINBC;
3746 Serge 5096
	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
3031 serge 5097
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5098
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5099
	else
5100
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 5101
 
3031 serge 5102
	dpll |= DPLL_VCO_ENABLE;
4104 Serge 5103
	crtc->config.dpll_hw_state.dpll = dpll;
2327 Serge 5104
 
4104 Serge 5105
	if (INTEL_INFO(dev)->gen >= 4) {
5106
		u32 dpll_md = (crtc->config.pixel_multiplier - 1)
5107
					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5108
		crtc->config.dpll_hw_state.dpll_md = dpll_md;
5109
	}
2327 Serge 5110
 
3746 Serge 5111
	if (crtc->config.has_dp_encoder)
5112
		intel_dp_set_m_n(crtc);
3031 serge 5113
}
2327 Serge 5114
 
3746 Serge 5115
static void i8xx_update_pll(struct intel_crtc *crtc,
5116
			    intel_clock_t *reduced_clock,
3031 serge 5117
			    int num_connectors)
5118
{
3746 Serge 5119
	struct drm_device *dev = crtc->base.dev;
3031 serge 5120
	struct drm_i915_private *dev_priv = dev->dev_private;
5121
	u32 dpll;
3746 Serge 5122
	struct dpll *clock = &crtc->config.dpll;
2327 Serge 5123
 
3746 Serge 5124
	i9xx_update_pll_dividers(crtc, reduced_clock);
3243 Serge 5125
 
3031 serge 5126
	dpll = DPLL_VGA_MODE_DIS;
2327 Serge 5127
 
3746 Serge 5128
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
3031 serge 5129
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5130
	} else {
5131
		if (clock->p1 == 2)
5132
			dpll |= PLL_P1_DIVIDE_BY_TWO;
5133
		else
5134
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5135
		if (clock->p2 == 4)
5136
			dpll |= PLL_P2_DIVIDE_BY_4;
5137
	}
2327 Serge 5138
 
4104 Serge 5139
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5140
		dpll |= DPLL_DVO_2X_MODE;
5141
 
3746 Serge 5142
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
3031 serge 5143
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5144
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5145
	else
5146
		dpll |= PLL_REF_INPUT_DREFCLK;
5147
 
5148
	dpll |= DPLL_VCO_ENABLE;
4104 Serge 5149
	crtc->config.dpll_hw_state.dpll = dpll;
3031 serge 5150
}
5151
 
4104 Serge 5152
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
3243 Serge 5153
{
5154
	struct drm_device *dev = intel_crtc->base.dev;
5155
	struct drm_i915_private *dev_priv = dev->dev_private;
5156
	enum pipe pipe = intel_crtc->pipe;
3746 Serge 5157
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4104 Serge 5158
	struct drm_display_mode *adjusted_mode =
5159
		&intel_crtc->config.adjusted_mode;
5160
	uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
3243 Serge 5161
 
4104 Serge 5162
	/* We need to be careful not to changed the adjusted mode, for otherwise
5163
	 * the hw state checker will get angry at the mismatch. */
5164
	crtc_vtotal = adjusted_mode->crtc_vtotal;
5165
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5166
 
3243 Serge 5167
	if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5168
		/* the chip adds 2 halflines automatically */
4104 Serge 5169
		crtc_vtotal -= 1;
5170
		crtc_vblank_end -= 1;
3243 Serge 5171
		vsyncshift = adjusted_mode->crtc_hsync_start
5172
			     - adjusted_mode->crtc_htotal / 2;
5173
	} else {
5174
		vsyncshift = 0;
5175
	}
5176
 
5177
	if (INTEL_INFO(dev)->gen > 3)
5178
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
5179
 
5180
	I915_WRITE(HTOTAL(cpu_transcoder),
5181
		   (adjusted_mode->crtc_hdisplay - 1) |
5182
		   ((adjusted_mode->crtc_htotal - 1) << 16));
5183
	I915_WRITE(HBLANK(cpu_transcoder),
5184
		   (adjusted_mode->crtc_hblank_start - 1) |
5185
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5186
	I915_WRITE(HSYNC(cpu_transcoder),
5187
		   (adjusted_mode->crtc_hsync_start - 1) |
5188
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5189
 
5190
	I915_WRITE(VTOTAL(cpu_transcoder),
5191
		   (adjusted_mode->crtc_vdisplay - 1) |
4104 Serge 5192
		   ((crtc_vtotal - 1) << 16));
3243 Serge 5193
	I915_WRITE(VBLANK(cpu_transcoder),
5194
		   (adjusted_mode->crtc_vblank_start - 1) |
4104 Serge 5195
		   ((crtc_vblank_end - 1) << 16));
3243 Serge 5196
	I915_WRITE(VSYNC(cpu_transcoder),
5197
		   (adjusted_mode->crtc_vsync_start - 1) |
5198
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5199
 
5200
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5201
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5202
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
5203
	 * bits. */
5204
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
5205
	    (pipe == PIPE_B || pipe == PIPE_C))
5206
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
5207
 
5208
	/* pipesrc controls the size that is scaled from, which should
5209
	 * always be the user's requested size.
5210
	 */
5211
	I915_WRITE(PIPESRC(pipe),
4560 Serge 5212
		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
5213
		   (intel_crtc->config.pipe_src_h - 1));
3243 Serge 5214
}
5215
 
4104 Serge 5216
static void intel_get_pipe_timings(struct intel_crtc *crtc,
5217
				   struct intel_crtc_config *pipe_config)
5218
{
5219
	struct drm_device *dev = crtc->base.dev;
5220
	struct drm_i915_private *dev_priv = dev->dev_private;
5221
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5222
	uint32_t tmp;
5223
 
5224
	tmp = I915_READ(HTOTAL(cpu_transcoder));
5225
	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5226
	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5227
	tmp = I915_READ(HBLANK(cpu_transcoder));
5228
	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
5229
	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
5230
	tmp = I915_READ(HSYNC(cpu_transcoder));
5231
	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5232
	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5233
 
5234
	tmp = I915_READ(VTOTAL(cpu_transcoder));
5235
	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5236
	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5237
	tmp = I915_READ(VBLANK(cpu_transcoder));
5238
	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
5239
	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
5240
	tmp = I915_READ(VSYNC(cpu_transcoder));
5241
	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5242
	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5243
 
5244
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
5245
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5246
		pipe_config->adjusted_mode.crtc_vtotal += 1;
5247
		pipe_config->adjusted_mode.crtc_vblank_end += 1;
5248
	}
5249
 
5250
	tmp = I915_READ(PIPESRC(crtc->pipe));
4560 Serge 5251
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5252
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5253
 
5254
	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5255
	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
4104 Serge 5256
}
5257
 
5258
static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
5259
					     struct intel_crtc_config *pipe_config)
5260
{
5261
	struct drm_crtc *crtc = &intel_crtc->base;
5262
 
5263
	crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5264
	crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
5265
	crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5266
	crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5267
 
5268
	crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5269
	crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5270
	crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5271
	crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
5272
 
5273
	crtc->mode.flags = pipe_config->adjusted_mode.flags;
5274
 
4560 Serge 5275
	crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
4104 Serge 5276
	crtc->mode.flags |= pipe_config->adjusted_mode.flags;
5277
}
5278
 
3746 Serge 5279
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5280
{
5281
	struct drm_device *dev = intel_crtc->base.dev;
5282
	struct drm_i915_private *dev_priv = dev->dev_private;
5283
	uint32_t pipeconf;
5284
 
4104 Serge 5285
	pipeconf = 0;
3746 Serge 5286
 
4104 Serge 5287
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
5288
	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
5289
		pipeconf |= PIPECONF_ENABLE;
5290
 
4560 Serge 5291
	if (intel_crtc->config.double_wide)
3746 Serge 5292
			pipeconf |= PIPECONF_DOUBLE_WIDE;
5293
 
4104 Serge 5294
	/* only g4x and later have fancy bpc/dither controls */
5295
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5296
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
5297
		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
5298
			pipeconf |= PIPECONF_DITHER_EN |
3746 Serge 5299
				    PIPECONF_DITHER_TYPE_SP;
5300
 
4104 Serge 5301
		switch (intel_crtc->config.pipe_bpp) {
5302
		case 18:
5303
			pipeconf |= PIPECONF_6BPC;
5304
			break;
5305
		case 24:
5306
			pipeconf |= PIPECONF_8BPC;
5307
			break;
5308
		case 30:
5309
			pipeconf |= PIPECONF_10BPC;
5310
			break;
5311
		default:
5312
			/* Case prevented by intel_choose_pipe_bpp_dither. */
5313
			BUG();
3746 Serge 5314
		}
5315
	}
5316
 
5317
	if (HAS_PIPE_CXSR(dev)) {
5318
		if (intel_crtc->lowfreq_avail) {
5319
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5320
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5321
		} else {
5322
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5323
		}
5324
	}
5325
 
5326
	if (!IS_GEN2(dev) &&
5327
	    intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5328
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5329
	else
5330
		pipeconf |= PIPECONF_PROGRESSIVE;
5331
 
4104 Serge 5332
	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
3746 Serge 5333
			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
5334
 
5335
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
5336
	POSTING_READ(PIPECONF(intel_crtc->pipe));
5337
}
5338
 
3031 serge 5339
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5340
			      int x, int y,
5341
			      struct drm_framebuffer *fb)
5342
{
5343
	struct drm_device *dev = crtc->dev;
5344
	struct drm_i915_private *dev_priv = dev->dev_private;
5345
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5346
	int pipe = intel_crtc->pipe;
5347
	int plane = intel_crtc->plane;
5348
	int refclk, num_connectors = 0;
5349
	intel_clock_t clock, reduced_clock;
3746 Serge 5350
	u32 dspcntr;
4104 Serge 5351
	bool ok, has_reduced_clock = false;
4560 Serge 5352
	bool is_lvds = false, is_dsi = false;
3031 serge 5353
	struct intel_encoder *encoder;
5354
	const intel_limit_t *limit;
5355
	int ret;
5356
 
5357
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5358
		switch (encoder->type) {
5359
		case INTEL_OUTPUT_LVDS:
5360
			is_lvds = true;
5361
			break;
4560 Serge 5362
		case INTEL_OUTPUT_DSI:
5363
			is_dsi = true;
5364
			break;
3031 serge 5365
		}
5366
 
5367
		num_connectors++;
5368
	}
5369
 
4560 Serge 5370
	if (is_dsi)
5371
		goto skip_dpll;
5372
 
5373
	if (!intel_crtc->config.clock_set) {
3031 serge 5374
	refclk = i9xx_get_refclk(crtc, num_connectors);
5375
 
5376
	/*
4560 Serge 5377
		 * Returns a set of divisors for the desired target clock with
5378
		 * the given refclk, or FALSE.  The returned values represent
5379
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
5380
		 * 2) / p1 / p2.
3031 serge 5381
	 */
5382
	limit = intel_limit(crtc, refclk);
4104 Serge 5383
	ok = dev_priv->display.find_dpll(limit, crtc,
5384
					 intel_crtc->config.port_clock,
5385
					 refclk, NULL, &clock);
4560 Serge 5386
		if (!ok) {
3031 serge 5387
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5388
		return -EINVAL;
5389
	}
5390
 
5391
	if (is_lvds && dev_priv->lvds_downclock_avail) {
5392
		/*
4560 Serge 5393
			 * Ensure we match the reduced clock's P to the target
5394
			 * clock.  If the clocks don't match, we can't switch
5395
			 * the display clock by using the FP0/FP1. In such case
5396
			 * we will disable the LVDS downclock feature.
3031 serge 5397
		*/
4104 Serge 5398
		has_reduced_clock =
5399
			dev_priv->display.find_dpll(limit, crtc,
3031 serge 5400
						    dev_priv->lvds_downclock,
4104 Serge 5401
						    refclk, &clock,
3031 serge 5402
						    &reduced_clock);
5403
	}
3746 Serge 5404
	/* Compat-code for transition, will disappear. */
5405
		intel_crtc->config.dpll.n = clock.n;
5406
		intel_crtc->config.dpll.m1 = clock.m1;
5407
		intel_crtc->config.dpll.m2 = clock.m2;
5408
		intel_crtc->config.dpll.p1 = clock.p1;
5409
		intel_crtc->config.dpll.p2 = clock.p2;
5410
	}
3031 serge 5411
 
4560 Serge 5412
	if (IS_GEN2(dev)) {
4104 Serge 5413
		i8xx_update_pll(intel_crtc,
3243 Serge 5414
				has_reduced_clock ? &reduced_clock : NULL,
5415
				num_connectors);
4560 Serge 5416
	} else if (IS_VALLEYVIEW(dev)) {
3746 Serge 5417
		vlv_update_pll(intel_crtc);
4560 Serge 5418
	} else {
3746 Serge 5419
		i9xx_update_pll(intel_crtc,
3031 serge 5420
				has_reduced_clock ? &reduced_clock : NULL,
5421
				num_connectors);
4560 Serge 5422
	}
3031 serge 5423
 
4560 Serge 5424
skip_dpll:
3031 serge 5425
	/* Set up the display plane register */
5426
	dspcntr = DISPPLANE_GAMMA_ENABLE;
5427
 
3746 Serge 5428
	if (!IS_VALLEYVIEW(dev)) {
3031 serge 5429
	if (pipe == 0)
5430
		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5431
	else
5432
		dspcntr |= DISPPLANE_SEL_PIPE_B;
3243 Serge 5433
	}
5434
 
4104 Serge 5435
	intel_set_pipe_timings(intel_crtc);
3031 serge 5436
 
5437
	/* pipesrc and dspsize control the size that is scaled from,
5438
	 * which should always be the user's requested size.
5439
	 */
5440
	I915_WRITE(DSPSIZE(plane),
4560 Serge 5441
		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
5442
		   (intel_crtc->config.pipe_src_w - 1));
3031 serge 5443
	I915_WRITE(DSPPOS(plane), 0);
2327 Serge 5444
 
3746 Serge 5445
	i9xx_set_pipeconf(intel_crtc);
5446
 
3031 serge 5447
	I915_WRITE(DSPCNTR(plane), dspcntr);
5448
	POSTING_READ(DSPCNTR(plane));
2327 Serge 5449
 
3031 serge 5450
	ret = intel_pipe_set_base(crtc, x, y, fb);
2327 Serge 5451
 
5452
    return ret;
5453
}
5454
 
4104 Serge 5455
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5456
				 struct intel_crtc_config *pipe_config)
5457
{
5458
	struct drm_device *dev = crtc->base.dev;
5459
	struct drm_i915_private *dev_priv = dev->dev_private;
5460
	uint32_t tmp;
5461
 
4560 Serge 5462
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
5463
		return;
5464
 
4104 Serge 5465
	tmp = I915_READ(PFIT_CONTROL);
5466
	if (!(tmp & PFIT_ENABLE))
5467
		return;
5468
 
5469
	/* Check whether the pfit is attached to our pipe. */
5470
	if (INTEL_INFO(dev)->gen < 4) {
5471
		if (crtc->pipe != PIPE_B)
5472
			return;
5473
	} else {
5474
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5475
			return;
5476
	}
5477
 
5478
	pipe_config->gmch_pfit.control = tmp;
5479
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
5480
	if (INTEL_INFO(dev)->gen < 5)
5481
		pipe_config->gmch_pfit.lvds_border_bits =
5482
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
5483
}
5484
 
4398 Serge 5485
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5486
			       struct intel_crtc_config *pipe_config)
5487
{
5488
	struct drm_device *dev = crtc->base.dev;
5489
	struct drm_i915_private *dev_priv = dev->dev_private;
5490
	int pipe = pipe_config->cpu_transcoder;
5491
	intel_clock_t clock;
5492
	u32 mdiv;
5493
	int refclk = 100000;
5494
 
5495
	mutex_lock(&dev_priv->dpio_lock);
4560 Serge 5496
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4398 Serge 5497
	mutex_unlock(&dev_priv->dpio_lock);
5498
 
5499
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5500
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
5501
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5502
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5503
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5504
 
4560 Serge 5505
	vlv_clock(refclk, &clock);
4398 Serge 5506
 
4560 Serge 5507
	/* clock.dot is the fast clock */
5508
	pipe_config->port_clock = clock.dot / 5;
4398 Serge 5509
}
5510
 
3746 Serge 5511
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5512
				 struct intel_crtc_config *pipe_config)
5513
{
5514
	struct drm_device *dev = crtc->base.dev;
5515
	struct drm_i915_private *dev_priv = dev->dev_private;
5516
	uint32_t tmp;
5517
 
4104 Serge 5518
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5519
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5520
 
3746 Serge 5521
	tmp = I915_READ(PIPECONF(crtc->pipe));
5522
	if (!(tmp & PIPECONF_ENABLE))
5523
		return false;
5524
 
4280 Serge 5525
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5526
		switch (tmp & PIPECONF_BPC_MASK) {
5527
		case PIPECONF_6BPC:
5528
			pipe_config->pipe_bpp = 18;
5529
			break;
5530
		case PIPECONF_8BPC:
5531
			pipe_config->pipe_bpp = 24;
5532
			break;
5533
		case PIPECONF_10BPC:
5534
			pipe_config->pipe_bpp = 30;
5535
			break;
5536
		default:
5537
			break;
5538
		}
5539
	}
5540
 
4560 Serge 5541
	if (INTEL_INFO(dev)->gen < 4)
5542
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5543
 
4104 Serge 5544
	intel_get_pipe_timings(crtc, pipe_config);
5545
 
5546
	i9xx_get_pfit_config(crtc, pipe_config);
5547
 
5548
	if (INTEL_INFO(dev)->gen >= 4) {
5549
		tmp = I915_READ(DPLL_MD(crtc->pipe));
5550
		pipe_config->pixel_multiplier =
5551
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5552
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5553
		pipe_config->dpll_hw_state.dpll_md = tmp;
5554
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5555
		tmp = I915_READ(DPLL(crtc->pipe));
5556
		pipe_config->pixel_multiplier =
5557
			((tmp & SDVO_MULTIPLIER_MASK)
5558
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5559
	} else {
5560
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
5561
		 * port and will be fixed up in the encoder->get_config
5562
		 * function. */
5563
		pipe_config->pixel_multiplier = 1;
5564
	}
5565
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
5566
	if (!IS_VALLEYVIEW(dev)) {
5567
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
5568
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
5569
	} else {
5570
		/* Mask out read-only status bits. */
5571
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5572
						     DPLL_PORTC_READY_MASK |
5573
						     DPLL_PORTB_READY_MASK);
5574
	}
5575
 
4560 Serge 5576
	if (IS_VALLEYVIEW(dev))
5577
		vlv_crtc_clock_get(crtc, pipe_config);
5578
	else
5579
		i9xx_crtc_clock_get(crtc, pipe_config);
5580
 
3746 Serge 5581
	return true;
5582
}
5583
 
3243 Serge 5584
static void ironlake_init_pch_refclk(struct drm_device *dev)
2327 Serge 5585
{
5586
	struct drm_i915_private *dev_priv = dev->dev_private;
5587
	struct drm_mode_config *mode_config = &dev->mode_config;
5588
	struct intel_encoder *encoder;
3746 Serge 5589
	u32 val, final;
2327 Serge 5590
	bool has_lvds = false;
2342 Serge 5591
	bool has_cpu_edp = false;
5592
	bool has_panel = false;
5593
	bool has_ck505 = false;
5594
	bool can_ssc = false;
2327 Serge 5595
 
5596
	/* We need to take the global config into account */
5597
		list_for_each_entry(encoder, &mode_config->encoder_list,
5598
				    base.head) {
5599
			switch (encoder->type) {
5600
			case INTEL_OUTPUT_LVDS:
2342 Serge 5601
			has_panel = true;
2327 Serge 5602
				has_lvds = true;
2342 Serge 5603
			break;
2327 Serge 5604
			case INTEL_OUTPUT_EDP:
2342 Serge 5605
			has_panel = true;
4104 Serge 5606
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
2342 Serge 5607
				has_cpu_edp = true;
2327 Serge 5608
				break;
5609
			}
5610
		}
2342 Serge 5611
 
5612
	if (HAS_PCH_IBX(dev)) {
4104 Serge 5613
		has_ck505 = dev_priv->vbt.display_clock_mode;
2342 Serge 5614
		can_ssc = has_ck505;
5615
	} else {
5616
		has_ck505 = false;
5617
		can_ssc = true;
2327 Serge 5618
	}
5619
 
4104 Serge 5620
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
5621
		      has_panel, has_lvds, has_ck505);
2342 Serge 5622
 
2327 Serge 5623
	/* Ironlake: try to setup display ref clock before DPLL
5624
	 * enabling. This is only under driver's control after
5625
	 * PCH B stepping, previous chipset stepping should be
5626
	 * ignoring this setting.
5627
	 */
3746 Serge 5628
	val = I915_READ(PCH_DREF_CONTROL);
5629
 
5630
	/* As we must carefully and slowly disable/enable each source in turn,
5631
	 * compute the final state we want first and check if we need to
5632
	 * make any changes at all.
5633
	 */
5634
	final = val;
5635
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
5636
	if (has_ck505)
5637
		final |= DREF_NONSPREAD_CK505_ENABLE;
5638
	else
5639
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
5640
 
5641
	final &= ~DREF_SSC_SOURCE_MASK;
5642
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5643
	final &= ~DREF_SSC1_ENABLE;
5644
 
5645
	if (has_panel) {
5646
		final |= DREF_SSC_SOURCE_ENABLE;
5647
 
5648
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
5649
			final |= DREF_SSC1_ENABLE;
5650
 
5651
		if (has_cpu_edp) {
5652
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
5653
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5654
			else
5655
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5656
		} else
5657
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5658
	} else {
5659
		final |= DREF_SSC_SOURCE_DISABLE;
5660
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5661
	}
5662
 
5663
	if (final == val)
5664
		return;
5665
 
2327 Serge 5666
	/* Always enable nonspread source */
3746 Serge 5667
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
2342 Serge 5668
 
5669
	if (has_ck505)
3746 Serge 5670
		val |= DREF_NONSPREAD_CK505_ENABLE;
2342 Serge 5671
	else
3746 Serge 5672
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
2342 Serge 5673
 
5674
	if (has_panel) {
3746 Serge 5675
		val &= ~DREF_SSC_SOURCE_MASK;
5676
		val |= DREF_SSC_SOURCE_ENABLE;
2327 Serge 5677
 
2342 Serge 5678
		/* SSC must be turned on before enabling the CPU output  */
5679
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5680
			DRM_DEBUG_KMS("Using SSC on panel\n");
3746 Serge 5681
			val |= DREF_SSC1_ENABLE;
3031 serge 5682
		} else
3746 Serge 5683
			val &= ~DREF_SSC1_ENABLE;
2327 Serge 5684
 
2342 Serge 5685
		/* Get SSC going before enabling the outputs */
3746 Serge 5686
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 5687
			POSTING_READ(PCH_DREF_CONTROL);
5688
			udelay(200);
2342 Serge 5689
 
3746 Serge 5690
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2327 Serge 5691
 
5692
		/* Enable CPU source on CPU attached eDP */
2342 Serge 5693
		if (has_cpu_edp) {
5694
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5695
				DRM_DEBUG_KMS("Using SSC on eDP\n");
3746 Serge 5696
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
2342 Serge 5697
			}
2327 Serge 5698
			else
3746 Serge 5699
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2342 Serge 5700
		} else
3746 Serge 5701
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 5702
 
3746 Serge 5703
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 5704
		POSTING_READ(PCH_DREF_CONTROL);
5705
		udelay(200);
2327 Serge 5706
		} else {
2342 Serge 5707
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
5708
 
3746 Serge 5709
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2342 Serge 5710
 
5711
		/* Turn off CPU output */
3746 Serge 5712
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 5713
 
3746 Serge 5714
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 5715
		POSTING_READ(PCH_DREF_CONTROL);
5716
		udelay(200);
2342 Serge 5717
 
5718
		/* Turn off the SSC source */
3746 Serge 5719
		val &= ~DREF_SSC_SOURCE_MASK;
5720
		val |= DREF_SSC_SOURCE_DISABLE;
2342 Serge 5721
 
5722
		/* Turn off SSC1 */
3746 Serge 5723
		val &= ~DREF_SSC1_ENABLE;
2342 Serge 5724
 
3746 Serge 5725
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 5726
		POSTING_READ(PCH_DREF_CONTROL);
5727
		udelay(200);
2327 Serge 5728
	}
3746 Serge 5729
 
5730
	BUG_ON(val != final);
2327 Serge 5731
}
5732
 
4104 Serge 5733
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
3243 Serge 5734
{
4104 Serge 5735
	uint32_t tmp;
3243 Serge 5736
 
5737
		tmp = I915_READ(SOUTH_CHICKEN2);
5738
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5739
		I915_WRITE(SOUTH_CHICKEN2, tmp);
5740
 
5741
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5742
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5743
			DRM_ERROR("FDI mPHY reset assert timeout\n");
5744
 
5745
		tmp = I915_READ(SOUTH_CHICKEN2);
5746
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5747
		I915_WRITE(SOUTH_CHICKEN2, tmp);
5748
 
5749
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
4104 Serge 5750
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
3243 Serge 5751
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
4539 Serge 5752
}
3243 Serge 5753
 
4104 Serge 5754
/* WaMPhyProgramming:hsw */
5755
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5756
{
5757
	uint32_t tmp;
5758
 
3243 Serge 5759
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5760
	tmp &= ~(0xFF << 24);
5761
	tmp |= (0x12 << 24);
5762
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5763
 
5764
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5765
	tmp |= (1 << 11);
5766
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5767
 
5768
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5769
	tmp |= (1 << 11);
5770
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5771
 
5772
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5773
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5774
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5775
 
5776
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5777
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5778
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5779
 
5780
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5781
		tmp &= ~(7 << 13);
5782
		tmp |= (5 << 13);
5783
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5784
 
5785
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5786
		tmp &= ~(7 << 13);
5787
		tmp |= (5 << 13);
5788
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5789
 
5790
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5791
	tmp &= ~0xFF;
5792
	tmp |= 0x1C;
5793
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5794
 
5795
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5796
	tmp &= ~0xFF;
5797
	tmp |= 0x1C;
5798
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5799
 
5800
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5801
	tmp &= ~(0xFF << 16);
5802
	tmp |= (0x1C << 16);
5803
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5804
 
5805
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5806
	tmp &= ~(0xFF << 16);
5807
	tmp |= (0x1C << 16);
5808
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5809
 
5810
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5811
		tmp |= (1 << 27);
5812
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5813
 
5814
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5815
		tmp |= (1 << 27);
5816
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5817
 
5818
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5819
		tmp &= ~(0xF << 28);
5820
		tmp |= (4 << 28);
5821
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5822
 
5823
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5824
		tmp &= ~(0xF << 28);
5825
		tmp |= (4 << 28);
5826
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
4539 Serge 5827
}
3243 Serge 5828
 
4104 Serge 5829
/* Implements 3 different sequences from BSpec chapter "Display iCLK
5830
 * Programming" based on the parameters passed:
5831
 * - Sequence to enable CLKOUT_DP
5832
 * - Sequence to enable CLKOUT_DP without spread
5833
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5834
 */
5835
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
5836
				 bool with_fdi)
5837
{
5838
	struct drm_i915_private *dev_priv = dev->dev_private;
5839
	uint32_t reg, tmp;
3480 Serge 5840
 
4104 Serge 5841
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
5842
		with_spread = true;
5843
	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
5844
		 with_fdi, "LP PCH doesn't have FDI\n"))
5845
		with_fdi = false;
5846
 
5847
	mutex_lock(&dev_priv->dpio_lock);
5848
 
5849
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5850
	tmp &= ~SBI_SSCCTL_DISABLE;
5851
	tmp |= SBI_SSCCTL_PATHALT;
5852
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5853
 
5854
	udelay(24);
5855
 
5856
	if (with_spread) {
5857
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5858
		tmp &= ~SBI_SSCCTL_PATHALT;
5859
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5860
 
5861
		if (with_fdi) {
5862
			lpt_reset_fdi_mphy(dev_priv);
5863
			lpt_program_fdi_mphy(dev_priv);
5864
		}
5865
	}
5866
 
5867
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5868
	       SBI_GEN0 : SBI_DBUFF0;
5869
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5870
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5871
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5872
 
3480 Serge 5873
	mutex_unlock(&dev_priv->dpio_lock);
3243 Serge 5874
}
5875
 
4104 Serge 5876
/* Sequence to disable CLKOUT_DP */
5877
static void lpt_disable_clkout_dp(struct drm_device *dev)
5878
{
5879
	struct drm_i915_private *dev_priv = dev->dev_private;
5880
	uint32_t reg, tmp;
5881
 
5882
	mutex_lock(&dev_priv->dpio_lock);
5883
 
5884
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5885
	       SBI_GEN0 : SBI_DBUFF0;
5886
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5887
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5888
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5889
 
5890
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5891
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5892
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5893
			tmp |= SBI_SSCCTL_PATHALT;
5894
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5895
			udelay(32);
5896
		}
5897
		tmp |= SBI_SSCCTL_DISABLE;
5898
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5899
	}
5900
 
5901
	mutex_unlock(&dev_priv->dpio_lock);
5902
}
5903
 
5904
static void lpt_init_pch_refclk(struct drm_device *dev)
5905
{
5906
	struct drm_mode_config *mode_config = &dev->mode_config;
5907
	struct intel_encoder *encoder;
5908
	bool has_vga = false;
5909
 
5910
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5911
		switch (encoder->type) {
5912
		case INTEL_OUTPUT_ANALOG:
5913
			has_vga = true;
5914
			break;
5915
		}
5916
	}
5917
 
5918
	if (has_vga)
5919
		lpt_enable_clkout_dp(dev, true, true);
5920
	else
5921
		lpt_disable_clkout_dp(dev);
5922
}
5923
 
3243 Serge 5924
/*
5925
 * Initialize reference clocks when the driver loads
5926
 */
5927
void intel_init_pch_refclk(struct drm_device *dev)
5928
{
5929
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5930
		ironlake_init_pch_refclk(dev);
5931
	else if (HAS_PCH_LPT(dev))
5932
		lpt_init_pch_refclk(dev);
5933
}
5934
 
2342 Serge 5935
static int ironlake_get_refclk(struct drm_crtc *crtc)
5936
{
5937
	struct drm_device *dev = crtc->dev;
5938
	struct drm_i915_private *dev_priv = dev->dev_private;
5939
	struct intel_encoder *encoder;
5940
	int num_connectors = 0;
5941
	bool is_lvds = false;
5942
 
3031 serge 5943
	for_each_encoder_on_crtc(dev, crtc, encoder) {
2342 Serge 5944
		switch (encoder->type) {
5945
		case INTEL_OUTPUT_LVDS:
5946
			is_lvds = true;
5947
			break;
5948
		}
5949
		num_connectors++;
5950
	}
5951
 
5952
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4560 Serge 5953
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
4104 Serge 5954
			      dev_priv->vbt.lvds_ssc_freq);
4560 Serge 5955
		return dev_priv->vbt.lvds_ssc_freq;
2342 Serge 5956
	}
5957
 
5958
	return 120000;
5959
}
5960
 
4104 Serge 5961
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
3031 serge 5962
{
5963
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5964
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5965
	int pipe = intel_crtc->pipe;
5966
	uint32_t val;
5967
 
4104 Serge 5968
	val = 0;
3031 serge 5969
 
3746 Serge 5970
	switch (intel_crtc->config.pipe_bpp) {
3031 serge 5971
	case 18:
3480 Serge 5972
		val |= PIPECONF_6BPC;
3031 serge 5973
		break;
5974
	case 24:
3480 Serge 5975
		val |= PIPECONF_8BPC;
3031 serge 5976
		break;
5977
	case 30:
3480 Serge 5978
		val |= PIPECONF_10BPC;
3031 serge 5979
		break;
5980
	case 36:
3480 Serge 5981
		val |= PIPECONF_12BPC;
3031 serge 5982
		break;
5983
	default:
3243 Serge 5984
		/* Case prevented by intel_choose_pipe_bpp_dither. */
5985
		BUG();
3031 serge 5986
	}
5987
 
4104 Serge 5988
	if (intel_crtc->config.dither)
3031 serge 5989
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5990
 
4104 Serge 5991
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3031 serge 5992
		val |= PIPECONF_INTERLACED_ILK;
5993
	else
5994
		val |= PIPECONF_PROGRESSIVE;
5995
 
3746 Serge 5996
	if (intel_crtc->config.limited_color_range)
3480 Serge 5997
		val |= PIPECONF_COLOR_RANGE_SELECT;
5998
 
3031 serge 5999
	I915_WRITE(PIPECONF(pipe), val);
6000
	POSTING_READ(PIPECONF(pipe));
6001
}
6002
 
3480 Serge 6003
/*
6004
 * Set up the pipe CSC unit.
6005
 *
6006
 * Currently only full range RGB to limited range RGB conversion
6007
 * is supported, but eventually this should handle various
6008
 * RGB<->YCbCr scenarios as well.
6009
 */
3746 Serge 6010
static void intel_set_pipe_csc(struct drm_crtc *crtc)
3480 Serge 6011
{
6012
	struct drm_device *dev = crtc->dev;
6013
	struct drm_i915_private *dev_priv = dev->dev_private;
6014
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6015
	int pipe = intel_crtc->pipe;
6016
	uint16_t coeff = 0x7800; /* 1.0 */
6017
 
6018
	/*
6019
	 * TODO: Check what kind of values actually come out of the pipe
6020
	 * with these coeff/postoff values and adjust to get the best
6021
	 * accuracy. Perhaps we even need to take the bpc value into
6022
	 * consideration.
6023
	 */
6024
 
3746 Serge 6025
	if (intel_crtc->config.limited_color_range)
3480 Serge 6026
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
6027
 
6028
	/*
6029
	 * GY/GU and RY/RU should be the other way around according
6030
	 * to BSpec, but reality doesn't agree. Just set them up in
6031
	 * a way that results in the correct picture.
6032
	 */
6033
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
6034
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
6035
 
6036
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
6037
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
6038
 
6039
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
6040
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
6041
 
6042
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
6043
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
6044
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
6045
 
6046
	if (INTEL_INFO(dev)->gen > 6) {
6047
		uint16_t postoff = 0;
6048
 
3746 Serge 6049
		if (intel_crtc->config.limited_color_range)
4398 Serge 6050
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
3480 Serge 6051
 
6052
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6053
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
6054
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
6055
 
6056
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
6057
	} else {
6058
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
6059
 
3746 Serge 6060
		if (intel_crtc->config.limited_color_range)
3480 Serge 6061
			mode |= CSC_BLACK_SCREEN_OFFSET;
6062
 
6063
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
6064
	}
6065
}
6066
 
4104 Serge 6067
static void haswell_set_pipeconf(struct drm_crtc *crtc)
3243 Serge 6068
{
4560 Serge 6069
	struct drm_device *dev = crtc->dev;
6070
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 6071
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4560 Serge 6072
	enum pipe pipe = intel_crtc->pipe;
3746 Serge 6073
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 6074
	uint32_t val;
6075
 
4104 Serge 6076
	val = 0;
3243 Serge 6077
 
4560 Serge 6078
	if (IS_HASWELL(dev) && intel_crtc->config.dither)
3243 Serge 6079
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6080
 
4104 Serge 6081
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3243 Serge 6082
		val |= PIPECONF_INTERLACED_ILK;
6083
	else
6084
		val |= PIPECONF_PROGRESSIVE;
6085
 
6086
	I915_WRITE(PIPECONF(cpu_transcoder), val);
6087
	POSTING_READ(PIPECONF(cpu_transcoder));
4104 Serge 6088
 
6089
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6090
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
4560 Serge 6091
 
6092
	if (IS_BROADWELL(dev)) {
6093
		val = 0;
6094
 
6095
		switch (intel_crtc->config.pipe_bpp) {
6096
		case 18:
6097
			val |= PIPEMISC_DITHER_6_BPC;
6098
			break;
6099
		case 24:
6100
			val |= PIPEMISC_DITHER_8_BPC;
6101
			break;
6102
		case 30:
6103
			val |= PIPEMISC_DITHER_10_BPC;
6104
			break;
6105
		case 36:
6106
			val |= PIPEMISC_DITHER_12_BPC;
6107
			break;
6108
		default:
6109
			/* Case prevented by pipe_config_set_bpp. */
6110
			BUG();
6111
		}
6112
 
6113
		if (intel_crtc->config.dither)
6114
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6115
 
6116
		I915_WRITE(PIPEMISC(pipe), val);
6117
	}
3243 Serge 6118
}
6119
 
3031 serge 6120
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6121
				    intel_clock_t *clock,
6122
				    bool *has_reduced_clock,
6123
				    intel_clock_t *reduced_clock)
6124
{
6125
	struct drm_device *dev = crtc->dev;
6126
	struct drm_i915_private *dev_priv = dev->dev_private;
6127
	struct intel_encoder *intel_encoder;
6128
	int refclk;
6129
	const intel_limit_t *limit;
4104 Serge 6130
	bool ret, is_lvds = false;
3031 serge 6131
 
6132
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6133
		switch (intel_encoder->type) {
6134
		case INTEL_OUTPUT_LVDS:
6135
			is_lvds = true;
6136
			break;
6137
		}
6138
	}
6139
 
6140
	refclk = ironlake_get_refclk(crtc);
6141
 
6142
	/*
6143
	 * Returns a set of divisors for the desired target clock with the given
6144
	 * refclk, or FALSE.  The returned values represent the clock equation:
6145
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
6146
	 */
6147
	limit = intel_limit(crtc, refclk);
4104 Serge 6148
	ret = dev_priv->display.find_dpll(limit, crtc,
6149
					  to_intel_crtc(crtc)->config.port_clock,
6150
					  refclk, NULL, clock);
3031 serge 6151
	if (!ret)
6152
		return false;
6153
 
6154
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6155
		/*
6156
		 * Ensure we match the reduced clock's P to the target clock.
6157
		 * If the clocks don't match, we can't switch the display clock
6158
		 * by using the FP0/FP1. In such case we will disable the LVDS
6159
		 * downclock feature.
6160
		*/
4104 Serge 6161
		*has_reduced_clock =
6162
			dev_priv->display.find_dpll(limit, crtc,
3031 serge 6163
						     dev_priv->lvds_downclock,
4104 Serge 6164
						    refclk, clock,
3031 serge 6165
						     reduced_clock);
6166
	}
6167
 
6168
	return true;
6169
}
6170
 
3243 Serge 6171
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
6172
{
6173
	/*
6174
	 * Account for spread spectrum to avoid
6175
	 * oversubscribing the link. Max center spread
6176
	 * is 2.5%; use 5% for safety's sake.
6177
	 */
6178
	u32 bps = target_clock * bpp * 21 / 20;
6179
	return bps / (link_bw * 8) + 1;
6180
}
6181
 
4104 Serge 6182
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
2327 Serge 6183
{
4104 Serge 6184
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
3746 Serge 6185
}
6186
 
3243 Serge 6187
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
4104 Serge 6188
				      u32 *fp,
3746 Serge 6189
				      intel_clock_t *reduced_clock, u32 *fp2)
3243 Serge 6190
{
6191
	struct drm_crtc *crtc = &intel_crtc->base;
6192
	struct drm_device *dev = crtc->dev;
6193
	struct drm_i915_private *dev_priv = dev->dev_private;
6194
	struct intel_encoder *intel_encoder;
6195
	uint32_t dpll;
3746 Serge 6196
	int factor, num_connectors = 0;
4104 Serge 6197
	bool is_lvds = false, is_sdvo = false;
3243 Serge 6198
 
6199
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6200
		switch (intel_encoder->type) {
6201
		case INTEL_OUTPUT_LVDS:
6202
			is_lvds = true;
6203
			break;
6204
		case INTEL_OUTPUT_SDVO:
6205
		case INTEL_OUTPUT_HDMI:
6206
			is_sdvo = true;
6207
			break;
6208
		}
6209
 
6210
		num_connectors++;
6211
	}
6212
 
2327 Serge 6213
    /* Enable autotuning of the PLL clock (if permissible) */
6214
    factor = 21;
6215
    if (is_lvds) {
6216
        if ((intel_panel_use_ssc(dev_priv) &&
4560 Serge 6217
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
3746 Serge 6218
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
2327 Serge 6219
            factor = 25;
4104 Serge 6220
	} else if (intel_crtc->config.sdvo_tv_clock)
2327 Serge 6221
        factor = 20;
6222
 
4104 Serge 6223
	if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
3746 Serge 6224
		*fp |= FP_CB_TUNE;
2327 Serge 6225
 
3746 Serge 6226
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
6227
		*fp2 |= FP_CB_TUNE;
6228
 
2327 Serge 6229
    dpll = 0;
6230
 
6231
    if (is_lvds)
6232
        dpll |= DPLLB_MODE_LVDS;
6233
    else
6234
        dpll |= DPLLB_MODE_DAC_SERIAL;
4104 Serge 6235
 
3746 Serge 6236
			dpll |= (intel_crtc->config.pixel_multiplier - 1)
6237
				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
2327 Serge 6238
 
4104 Serge 6239
	if (is_sdvo)
6240
		dpll |= DPLL_SDVO_HIGH_SPEED;
6241
	if (intel_crtc->config.has_dp_encoder)
6242
		dpll |= DPLL_SDVO_HIGH_SPEED;
6243
 
2327 Serge 6244
    /* compute bitmask from p1 value */
4104 Serge 6245
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
2327 Serge 6246
    /* also FPA1 */
4104 Serge 6247
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2327 Serge 6248
 
4104 Serge 6249
	switch (intel_crtc->config.dpll.p2) {
2327 Serge 6250
    case 5:
6251
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6252
        break;
6253
    case 7:
6254
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6255
        break;
6256
    case 10:
6257
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6258
        break;
6259
    case 14:
6260
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6261
        break;
6262
    }
6263
 
4104 Serge 6264
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
2327 Serge 6265
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6266
    else
6267
        dpll |= PLL_REF_INPUT_DREFCLK;
6268
 
4104 Serge 6269
	return dpll | DPLL_VCO_ENABLE;
3243 Serge 6270
}
6271
 
6272
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6273
				  int x, int y,
6274
				  struct drm_framebuffer *fb)
6275
{
6276
	struct drm_device *dev = crtc->dev;
6277
	struct drm_i915_private *dev_priv = dev->dev_private;
6278
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6279
	int pipe = intel_crtc->pipe;
6280
	int plane = intel_crtc->plane;
6281
	int num_connectors = 0;
6282
	intel_clock_t clock, reduced_clock;
4104 Serge 6283
	u32 dpll = 0, fp = 0, fp2 = 0;
3243 Serge 6284
	bool ok, has_reduced_clock = false;
3746 Serge 6285
	bool is_lvds = false;
3243 Serge 6286
	struct intel_encoder *encoder;
4104 Serge 6287
	struct intel_shared_dpll *pll;
3243 Serge 6288
	int ret;
6289
 
6290
	for_each_encoder_on_crtc(dev, crtc, encoder) {
6291
		switch (encoder->type) {
6292
		case INTEL_OUTPUT_LVDS:
6293
			is_lvds = true;
6294
			break;
6295
		}
6296
 
6297
		num_connectors++;
6298
	}
6299
 
6300
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
6301
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
6302
 
4104 Serge 6303
	ok = ironlake_compute_clocks(crtc, &clock,
3243 Serge 6304
				     &has_reduced_clock, &reduced_clock);
4104 Serge 6305
	if (!ok && !intel_crtc->config.clock_set) {
3243 Serge 6306
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
6307
		return -EINVAL;
6308
	}
3746 Serge 6309
	/* Compat-code for transition, will disappear. */
6310
	if (!intel_crtc->config.clock_set) {
6311
		intel_crtc->config.dpll.n = clock.n;
6312
		intel_crtc->config.dpll.m1 = clock.m1;
6313
		intel_crtc->config.dpll.m2 = clock.m2;
6314
		intel_crtc->config.dpll.p1 = clock.p1;
6315
		intel_crtc->config.dpll.p2 = clock.p2;
6316
	}
3243 Serge 6317
 
4104 Serge 6318
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
6319
	if (intel_crtc->config.has_pch_encoder) {
6320
		fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
3243 Serge 6321
	if (has_reduced_clock)
4104 Serge 6322
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
3243 Serge 6323
 
4104 Serge 6324
		dpll = ironlake_compute_dpll(intel_crtc,
6325
					     &fp, &reduced_clock,
3746 Serge 6326
				     has_reduced_clock ? &fp2 : NULL);
3243 Serge 6327
 
4104 Serge 6328
		intel_crtc->config.dpll_hw_state.dpll = dpll;
6329
		intel_crtc->config.dpll_hw_state.fp0 = fp;
6330
		if (has_reduced_clock)
6331
			intel_crtc->config.dpll_hw_state.fp1 = fp2;
6332
		else
6333
			intel_crtc->config.dpll_hw_state.fp1 = fp;
2327 Serge 6334
 
4104 Serge 6335
		pll = intel_get_shared_dpll(intel_crtc);
3031 serge 6336
		if (pll == NULL) {
4104 Serge 6337
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
6338
					 pipe_name(pipe));
2342 Serge 6339
			return -EINVAL;
2327 Serge 6340
        }
3031 serge 6341
	} else
4104 Serge 6342
		intel_put_shared_dpll(intel_crtc);
2327 Serge 6343
 
3746 Serge 6344
	if (intel_crtc->config.has_dp_encoder)
6345
		intel_dp_set_m_n(intel_crtc);
2342 Serge 6346
 
4104 Serge 6347
	if (is_lvds && has_reduced_clock && i915_powersave)
6348
		intel_crtc->lowfreq_avail = true;
6349
	else
6350
		intel_crtc->lowfreq_avail = false;
2327 Serge 6351
 
4104 Serge 6352
	intel_set_pipe_timings(intel_crtc);
2327 Serge 6353
 
4104 Serge 6354
	if (intel_crtc->config.has_pch_encoder) {
6355
		intel_cpu_transcoder_set_m_n(intel_crtc,
6356
					     &intel_crtc->config.fdi_m_n);
2342 Serge 6357
	}
2327 Serge 6358
 
4104 Serge 6359
	ironlake_set_pipeconf(crtc);
3243 Serge 6360
 
6361
	/* Set up the display plane register */
6362
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
6363
	POSTING_READ(DSPCNTR(plane));
6364
 
6365
	ret = intel_pipe_set_base(crtc, x, y, fb);
6366
 
4104 Serge 6367
	return ret;
6368
}
3243 Serge 6369
 
4560 Serge 6370
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
6371
					 struct intel_link_m_n *m_n)
4104 Serge 6372
{
6373
	struct drm_device *dev = crtc->base.dev;
6374
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 6375
	enum pipe pipe = crtc->pipe;
4104 Serge 6376
 
4560 Serge 6377
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
6378
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
6379
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
6380
		& ~TU_SIZE_MASK;
6381
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
6382
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
6383
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6384
}
6385
 
6386
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
6387
					 enum transcoder transcoder,
6388
					 struct intel_link_m_n *m_n)
6389
{
6390
	struct drm_device *dev = crtc->base.dev;
6391
	struct drm_i915_private *dev_priv = dev->dev_private;
6392
	enum pipe pipe = crtc->pipe;
6393
 
6394
	if (INTEL_INFO(dev)->gen >= 5) {
6395
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
6396
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
6397
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
4104 Serge 6398
					& ~TU_SIZE_MASK;
4560 Serge 6399
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
6400
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
4104 Serge 6401
				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
4560 Serge 6402
	} else {
6403
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
6404
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
6405
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
6406
			& ~TU_SIZE_MASK;
6407
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
6408
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
6409
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6410
	}
3243 Serge 6411
}
6412
 
4560 Serge 6413
void intel_dp_get_m_n(struct intel_crtc *crtc,
6414
		      struct intel_crtc_config *pipe_config)
6415
{
6416
	if (crtc->config.has_pch_encoder)
6417
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
6418
	else
6419
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6420
					     &pipe_config->dp_m_n);
6421
}
6422
 
6423
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
6424
					struct intel_crtc_config *pipe_config)
6425
{
6426
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6427
				     &pipe_config->fdi_m_n);
6428
}
6429
 
4104 Serge 6430
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
6431
				     struct intel_crtc_config *pipe_config)
6432
{
6433
	struct drm_device *dev = crtc->base.dev;
6434
	struct drm_i915_private *dev_priv = dev->dev_private;
6435
	uint32_t tmp;
6436
 
6437
	tmp = I915_READ(PF_CTL(crtc->pipe));
6438
 
6439
	if (tmp & PF_ENABLE) {
6440
		pipe_config->pch_pfit.enabled = true;
6441
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
6442
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
6443
 
6444
		/* We currently do not free assignements of panel fitters on
6445
		 * ivb/hsw (since we don't use the higher upscaling modes which
6446
		 * differentiates them) so just WARN about this case for now. */
6447
		if (IS_GEN7(dev)) {
6448
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
6449
				PF_PIPE_SEL_IVB(crtc->pipe));
6450
		}
6451
	}
6452
}
6453
 
3746 Serge 6454
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
6455
				     struct intel_crtc_config *pipe_config)
6456
{
6457
	struct drm_device *dev = crtc->base.dev;
6458
	struct drm_i915_private *dev_priv = dev->dev_private;
6459
	uint32_t tmp;
6460
 
4104 Serge 6461
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6462
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6463
 
3746 Serge 6464
	tmp = I915_READ(PIPECONF(crtc->pipe));
6465
	if (!(tmp & PIPECONF_ENABLE))
6466
		return false;
6467
 
4280 Serge 6468
	switch (tmp & PIPECONF_BPC_MASK) {
6469
	case PIPECONF_6BPC:
6470
		pipe_config->pipe_bpp = 18;
6471
		break;
6472
	case PIPECONF_8BPC:
6473
		pipe_config->pipe_bpp = 24;
6474
		break;
6475
	case PIPECONF_10BPC:
6476
		pipe_config->pipe_bpp = 30;
6477
		break;
6478
	case PIPECONF_12BPC:
6479
		pipe_config->pipe_bpp = 36;
6480
		break;
6481
	default:
6482
		break;
6483
	}
6484
 
4104 Serge 6485
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6486
		struct intel_shared_dpll *pll;
6487
 
3746 Serge 6488
		pipe_config->has_pch_encoder = true;
6489
 
4104 Serge 6490
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
6491
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6492
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6493
 
6494
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
6495
 
6496
		if (HAS_PCH_IBX(dev_priv->dev)) {
6497
			pipe_config->shared_dpll =
6498
				(enum intel_dpll_id) crtc->pipe;
6499
		} else {
6500
			tmp = I915_READ(PCH_DPLL_SEL);
6501
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6502
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
6503
			else
6504
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
6505
		}
6506
 
6507
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
6508
 
6509
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
6510
					   &pipe_config->dpll_hw_state));
6511
 
6512
		tmp = pipe_config->dpll_hw_state.dpll;
6513
		pipe_config->pixel_multiplier =
6514
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6515
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
4560 Serge 6516
 
6517
		ironlake_pch_clock_get(crtc, pipe_config);
4104 Serge 6518
	} else {
6519
		pipe_config->pixel_multiplier = 1;
6520
	}
6521
 
6522
	intel_get_pipe_timings(crtc, pipe_config);
6523
 
6524
	ironlake_get_pfit_config(crtc, pipe_config);
6525
 
3746 Serge 6526
	return true;
6527
}
6528
 
4104 Serge 6529
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6530
{
6531
	struct drm_device *dev = dev_priv->dev;
6532
	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
6533
	struct intel_crtc *crtc;
6534
	unsigned long irqflags;
6535
	uint32_t val;
6536
 
6537
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
4539 Serge 6538
		WARN(crtc->active, "CRTC for pipe %c enabled\n",
4104 Serge 6539
		     pipe_name(crtc->pipe));
6540
 
6541
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
6542
	WARN(plls->spll_refcount, "SPLL enabled\n");
6543
	WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
6544
	WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
6545
	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
6546
	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
6547
	     "CPU PWM1 enabled\n");
6548
	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
6549
	     "CPU PWM2 enabled\n");
6550
	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
6551
	     "PCH PWM1 enabled\n");
6552
	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
6553
	     "Utility pin enabled\n");
6554
	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
6555
 
6556
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
6557
	val = I915_READ(DEIMR);
4560 Serge 6558
	WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
4104 Serge 6559
	     "Unexpected DEIMR bits enabled: 0x%x\n", val);
6560
	val = I915_READ(SDEIMR);
6561
	WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
6562
	     "Unexpected SDEIMR bits enabled: 0x%x\n", val);
6563
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
6564
}
6565
 
6566
/*
6567
 * This function implements pieces of two sequences from BSpec:
6568
 * - Sequence for display software to disable LCPLL
6569
 * - Sequence for display software to allow package C8+
6570
 * The steps implemented here are just the steps that actually touch the LCPLL
6571
 * register. Callers should take care of disabling all the display engine
6572
 * functions, doing the mode unset, fixing interrupts, etc.
6573
 */
4560 Serge 6574
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4104 Serge 6575
		       bool switch_to_fclk, bool allow_power_down)
6576
{
6577
	uint32_t val;
6578
 
6579
	assert_can_disable_lcpll(dev_priv);
6580
 
6581
	val = I915_READ(LCPLL_CTL);
6582
 
6583
	if (switch_to_fclk) {
6584
		val |= LCPLL_CD_SOURCE_FCLK;
6585
		I915_WRITE(LCPLL_CTL, val);
6586
 
6587
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
6588
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
6589
			DRM_ERROR("Switching to FCLK failed\n");
6590
 
6591
		val = I915_READ(LCPLL_CTL);
6592
	}
6593
 
6594
	val |= LCPLL_PLL_DISABLE;
6595
	I915_WRITE(LCPLL_CTL, val);
6596
	POSTING_READ(LCPLL_CTL);
6597
 
6598
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
6599
		DRM_ERROR("LCPLL still locked\n");
6600
 
6601
	val = I915_READ(D_COMP);
6602
	val |= D_COMP_COMP_DISABLE;
4560 Serge 6603
	mutex_lock(&dev_priv->rps.hw_lock);
6604
	if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6605
		DRM_ERROR("Failed to disable D_COMP\n");
6606
	mutex_unlock(&dev_priv->rps.hw_lock);
4104 Serge 6607
	POSTING_READ(D_COMP);
4560 Serge 6608
    delay(1);
4104 Serge 6609
 
6610
	if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
6611
		DRM_ERROR("D_COMP RCOMP still in progress\n");
6612
 
6613
	if (allow_power_down) {
6614
		val = I915_READ(LCPLL_CTL);
6615
		val |= LCPLL_POWER_DOWN_ALLOW;
6616
		I915_WRITE(LCPLL_CTL, val);
6617
		POSTING_READ(LCPLL_CTL);
6618
	}
6619
}
6620
 
6621
/*
6622
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6623
 * source.
6624
 */
4560 Serge 6625
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4104 Serge 6626
{
6627
	uint32_t val;
6628
 
6629
	val = I915_READ(LCPLL_CTL);
6630
 
6631
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
6632
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
6633
		return;
6634
 
6635
	/* Make sure we're not on PC8 state before disabling PC8, otherwise
6636
	 * we'll hang the machine! */
4560 Serge 6637
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4104 Serge 6638
 
6639
	if (val & LCPLL_POWER_DOWN_ALLOW) {
6640
		val &= ~LCPLL_POWER_DOWN_ALLOW;
6641
		I915_WRITE(LCPLL_CTL, val);
6642
		POSTING_READ(LCPLL_CTL);
6643
	}
6644
 
6645
	val = I915_READ(D_COMP);
6646
	val |= D_COMP_COMP_FORCE;
6647
	val &= ~D_COMP_COMP_DISABLE;
4560 Serge 6648
	mutex_lock(&dev_priv->rps.hw_lock);
6649
	if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6650
		DRM_ERROR("Failed to enable D_COMP\n");
6651
	mutex_unlock(&dev_priv->rps.hw_lock);
4104 Serge 6652
	POSTING_READ(D_COMP);
6653
 
6654
	val = I915_READ(LCPLL_CTL);
6655
	val &= ~LCPLL_PLL_DISABLE;
6656
	I915_WRITE(LCPLL_CTL, val);
6657
 
6658
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
6659
		DRM_ERROR("LCPLL not locked yet\n");
6660
 
6661
	if (val & LCPLL_CD_SOURCE_FCLK) {
6662
		val = I915_READ(LCPLL_CTL);
6663
		val &= ~LCPLL_CD_SOURCE_FCLK;
6664
		I915_WRITE(LCPLL_CTL, val);
6665
 
6666
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
6667
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
6668
			DRM_ERROR("Switching back to LCPLL failed\n");
6669
	}
6670
 
4560 Serge 6671
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4104 Serge 6672
}
6673
 
6674
void hsw_enable_pc8_work(struct work_struct *__work)
6675
{
6676
	struct drm_i915_private *dev_priv =
6677
		container_of(to_delayed_work(__work), struct drm_i915_private,
6678
			     pc8.enable_work);
6679
	struct drm_device *dev = dev_priv->dev;
6680
	uint32_t val;
6681
 
4560 Serge 6682
	WARN_ON(!HAS_PC8(dev));
6683
 
4104 Serge 6684
	if (dev_priv->pc8.enabled)
6685
		return;
6686
 
6687
	DRM_DEBUG_KMS("Enabling package C8+\n");
6688
 
6689
	dev_priv->pc8.enabled = true;
6690
 
6691
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6692
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
6693
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6694
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6695
	}
6696
 
6697
	lpt_disable_clkout_dp(dev);
6698
	hsw_pc8_disable_interrupts(dev);
6699
	hsw_disable_lcpll(dev_priv, true, true);
4560 Serge 6700
 
6701
	intel_runtime_pm_put(dev_priv);
4104 Serge 6702
}
6703
 
6704
static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6705
{
6706
	WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6707
	WARN(dev_priv->pc8.disable_count < 1,
6708
	     "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6709
 
6710
	dev_priv->pc8.disable_count--;
6711
	if (dev_priv->pc8.disable_count != 0)
6712
		return;
6713
 
6714
	schedule_delayed_work(&dev_priv->pc8.enable_work,
6715
			      msecs_to_jiffies(i915_pc8_timeout));
6716
}
6717
 
6718
static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6719
{
6720
	struct drm_device *dev = dev_priv->dev;
6721
	uint32_t val;
6722
 
6723
	WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6724
	WARN(dev_priv->pc8.disable_count < 0,
6725
	     "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6726
 
6727
	dev_priv->pc8.disable_count++;
6728
	if (dev_priv->pc8.disable_count != 1)
6729
		return;
6730
 
4560 Serge 6731
	WARN_ON(!HAS_PC8(dev));
6732
 
4293 Serge 6733
	cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
4104 Serge 6734
	if (!dev_priv->pc8.enabled)
6735
		return;
6736
 
6737
	DRM_DEBUG_KMS("Disabling package C8+\n");
6738
 
4560 Serge 6739
	intel_runtime_pm_get(dev_priv);
6740
 
4104 Serge 6741
	hsw_restore_lcpll(dev_priv);
6742
	hsw_pc8_restore_interrupts(dev);
6743
	lpt_init_pch_refclk(dev);
6744
 
6745
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6746
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
6747
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
6748
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6749
	}
6750
 
6751
	intel_prepare_ddi(dev);
6752
	i915_gem_init_swizzling(dev);
6753
	mutex_lock(&dev_priv->rps.hw_lock);
6754
	gen6_update_ring_freq(dev);
6755
	mutex_unlock(&dev_priv->rps.hw_lock);
6756
	dev_priv->pc8.enabled = false;
6757
}
6758
 
6759
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6760
{
4560 Serge 6761
	if (!HAS_PC8(dev_priv->dev))
6762
		return;
6763
 
4104 Serge 6764
	mutex_lock(&dev_priv->pc8.lock);
6765
	__hsw_enable_package_c8(dev_priv);
6766
	mutex_unlock(&dev_priv->pc8.lock);
6767
}
6768
 
6769
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6770
{
4560 Serge 6771
	if (!HAS_PC8(dev_priv->dev))
6772
		return;
6773
 
4104 Serge 6774
	mutex_lock(&dev_priv->pc8.lock);
6775
	__hsw_disable_package_c8(dev_priv);
6776
	mutex_unlock(&dev_priv->pc8.lock);
6777
}
6778
 
6779
static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
6780
{
6781
	struct drm_device *dev = dev_priv->dev;
6782
	struct intel_crtc *crtc;
6783
	uint32_t val;
6784
 
6785
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6786
		if (crtc->base.enabled)
6787
			return false;
6788
 
6789
	/* This case is still possible since we have the i915.disable_power_well
6790
	 * parameter and also the KVMr or something else might be requesting the
6791
	 * power well. */
6792
	val = I915_READ(HSW_PWR_WELL_DRIVER);
6793
	if (val != 0) {
6794
		DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
6795
		return false;
6796
	}
6797
 
6798
	return true;
6799
}
6800
 
6801
/* Since we're called from modeset_global_resources there's no way to
6802
 * symmetrically increase and decrease the refcount, so we use
6803
 * dev_priv->pc8.requirements_met to track whether we already have the refcount
6804
 * or not.
6805
 */
6806
static void hsw_update_package_c8(struct drm_device *dev)
6807
{
6808
	struct drm_i915_private *dev_priv = dev->dev_private;
6809
	bool allow;
6810
 
4560 Serge 6811
	if (!HAS_PC8(dev_priv->dev))
6812
		return;
6813
 
4104 Serge 6814
	if (!i915_enable_pc8)
6815
		return;
6816
 
6817
	mutex_lock(&dev_priv->pc8.lock);
6818
 
6819
	allow = hsw_can_enable_package_c8(dev_priv);
6820
 
6821
	if (allow == dev_priv->pc8.requirements_met)
6822
		goto done;
6823
 
6824
	dev_priv->pc8.requirements_met = allow;
6825
 
6826
	if (allow)
6827
		__hsw_enable_package_c8(dev_priv);
6828
	else
6829
		__hsw_disable_package_c8(dev_priv);
6830
 
6831
done:
6832
	mutex_unlock(&dev_priv->pc8.lock);
6833
}
6834
 
6835
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6836
{
4560 Serge 6837
	if (!HAS_PC8(dev_priv->dev))
6838
		return;
6839
 
6840
	mutex_lock(&dev_priv->pc8.lock);
4104 Serge 6841
	if (!dev_priv->pc8.gpu_idle) {
6842
		dev_priv->pc8.gpu_idle = true;
4560 Serge 6843
		__hsw_enable_package_c8(dev_priv);
4104 Serge 6844
	}
4560 Serge 6845
	mutex_unlock(&dev_priv->pc8.lock);
4104 Serge 6846
}
6847
 
6848
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6849
{
4560 Serge 6850
	if (!HAS_PC8(dev_priv->dev))
6851
		return;
6852
 
6853
	mutex_lock(&dev_priv->pc8.lock);
4104 Serge 6854
	if (dev_priv->pc8.gpu_idle) {
6855
		dev_priv->pc8.gpu_idle = false;
4560 Serge 6856
		__hsw_disable_package_c8(dev_priv);
4104 Serge 6857
	}
4560 Serge 6858
	mutex_unlock(&dev_priv->pc8.lock);
4104 Serge 6859
}
6860
 
4560 Serge 6861
#define for_each_power_domain(domain, mask)				\
6862
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
6863
		if ((1 << (domain)) & (mask))
6864
 
6865
static unsigned long get_pipe_power_domains(struct drm_device *dev,
6866
					    enum pipe pipe, bool pfit_enabled)
3480 Serge 6867
{
4560 Serge 6868
	unsigned long mask;
6869
	enum transcoder transcoder;
6870
 
6871
	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
6872
 
6873
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
6874
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
6875
	if (pfit_enabled)
6876
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6877
 
6878
	return mask;
6879
}
6880
 
6881
void intel_display_set_init_power(struct drm_device *dev, bool enable)
6882
{
6883
	struct drm_i915_private *dev_priv = dev->dev_private;
6884
 
6885
	if (dev_priv->power_domains.init_power_on == enable)
6886
		return;
6887
 
6888
	if (enable)
6889
		intel_display_power_get(dev, POWER_DOMAIN_INIT);
6890
	else
6891
		intel_display_power_put(dev, POWER_DOMAIN_INIT);
6892
 
6893
	dev_priv->power_domains.init_power_on = enable;
6894
}
6895
 
6896
static void modeset_update_power_wells(struct drm_device *dev)
6897
{
6898
	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
3480 Serge 6899
	struct intel_crtc *crtc;
6900
 
4560 Serge 6901
	/*
6902
	 * First get all needed power domains, then put all unneeded, to avoid
6903
	 * any unnecessary toggling of the power wells.
6904
	 */
3480 Serge 6905
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
4560 Serge 6906
		enum intel_display_power_domain domain;
6907
 
4104 Serge 6908
		if (!crtc->base.enabled)
6909
			continue;
3480 Serge 6910
 
4560 Serge 6911
		pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
6912
						crtc->pipe,
6913
						crtc->config.pch_pfit.enabled);
6914
 
6915
		for_each_power_domain(domain, pipe_domains[crtc->pipe])
6916
			intel_display_power_get(dev, domain);
3480 Serge 6917
	}
6918
 
4560 Serge 6919
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6920
		enum intel_display_power_domain domain;
3480 Serge 6921
 
4560 Serge 6922
		for_each_power_domain(domain, crtc->enabled_power_domains)
6923
			intel_display_power_put(dev, domain);
6924
 
6925
		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
6926
	}
6927
 
6928
	intel_display_set_init_power(dev, false);
6929
}
6930
 
6931
static void haswell_modeset_global_resources(struct drm_device *dev)
6932
{
6933
	modeset_update_power_wells(dev);
4104 Serge 6934
	hsw_update_package_c8(dev);
3480 Serge 6935
}
6936
 
3243 Serge 6937
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6938
				 int x, int y,
6939
				 struct drm_framebuffer *fb)
6940
{
6941
	struct drm_device *dev = crtc->dev;
6942
	struct drm_i915_private *dev_priv = dev->dev_private;
6943
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6944
	int plane = intel_crtc->plane;
6945
	int ret;
6946
 
4560 Serge 6947
	if (!intel_ddi_pll_select(intel_crtc))
3243 Serge 6948
		return -EINVAL;
4560 Serge 6949
	intel_ddi_pll_enable(intel_crtc);
3243 Serge 6950
 
3746 Serge 6951
	if (intel_crtc->config.has_dp_encoder)
6952
		intel_dp_set_m_n(intel_crtc);
2327 Serge 6953
 
3243 Serge 6954
	intel_crtc->lowfreq_avail = false;
2327 Serge 6955
 
4104 Serge 6956
	intel_set_pipe_timings(intel_crtc);
3243 Serge 6957
 
4104 Serge 6958
	if (intel_crtc->config.has_pch_encoder) {
6959
		intel_cpu_transcoder_set_m_n(intel_crtc,
6960
					     &intel_crtc->config.fdi_m_n);
6961
	}
3243 Serge 6962
 
4104 Serge 6963
	haswell_set_pipeconf(crtc);
2327 Serge 6964
 
3746 Serge 6965
	intel_set_pipe_csc(crtc);
3480 Serge 6966
 
3031 serge 6967
	/* Set up the display plane register */
3480 Serge 6968
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
2327 Serge 6969
    POSTING_READ(DSPCNTR(plane));
6970
 
3031 serge 6971
	ret = intel_pipe_set_base(crtc, x, y, fb);
2327 Serge 6972
 
6973
    return ret;
6974
}
6975
 
3746 Serge 6976
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6977
				    struct intel_crtc_config *pipe_config)
6978
{
6979
	struct drm_device *dev = crtc->base.dev;
6980
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 6981
	enum intel_display_power_domain pfit_domain;
3746 Serge 6982
	uint32_t tmp;
6983
 
4104 Serge 6984
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6985
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6986
 
6987
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
6988
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
6989
		enum pipe trans_edp_pipe;
6990
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6991
		default:
6992
			WARN(1, "unknown pipe linked to edp transcoder\n");
6993
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
6994
		case TRANS_DDI_EDP_INPUT_A_ON:
6995
			trans_edp_pipe = PIPE_A;
6996
			break;
6997
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
6998
			trans_edp_pipe = PIPE_B;
6999
			break;
7000
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
7001
			trans_edp_pipe = PIPE_C;
7002
			break;
7003
		}
7004
 
7005
		if (trans_edp_pipe == crtc->pipe)
7006
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
7007
	}
7008
 
7009
	if (!intel_display_power_enabled(dev,
7010
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7011
		return false;
7012
 
7013
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
3746 Serge 7014
	if (!(tmp & PIPECONF_ENABLE))
7015
		return false;
7016
 
7017
	/*
4104 Serge 7018
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
3746 Serge 7019
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
7020
	 * the PCH transcoder is on.
7021
	 */
4104 Serge 7022
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
3746 Serge 7023
	if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
4104 Serge 7024
	    I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
3746 Serge 7025
		pipe_config->has_pch_encoder = true;
7026
 
4104 Serge 7027
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7028
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7029
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
3746 Serge 7030
 
4104 Serge 7031
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7032
	}
7033
 
7034
	intel_get_pipe_timings(crtc, pipe_config);
7035
 
7036
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7037
	if (intel_display_power_enabled(dev, pfit_domain))
7038
		ironlake_get_pfit_config(crtc, pipe_config);
7039
 
4560 Serge 7040
	if (IS_HASWELL(dev))
4104 Serge 7041
	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7042
				   (I915_READ(IPS_CTL) & IPS_ENABLE);
7043
 
7044
	pipe_config->pixel_multiplier = 1;
7045
 
3746 Serge 7046
	return true;
7047
}
7048
 
2330 Serge 7049
static int intel_crtc_mode_set(struct drm_crtc *crtc,
7050
			       int x, int y,
3031 serge 7051
			       struct drm_framebuffer *fb)
2330 Serge 7052
{
7053
	struct drm_device *dev = crtc->dev;
7054
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 7055
	struct intel_encoder *encoder;
2330 Serge 7056
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 7057
	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
2330 Serge 7058
	int pipe = intel_crtc->pipe;
7059
	int ret;
2327 Serge 7060
 
3031 serge 7061
	drm_vblank_pre_modeset(dev, pipe);
2327 Serge 7062
 
3746 Serge 7063
	ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
7064
 
3031 serge 7065
	drm_vblank_post_modeset(dev, pipe);
2327 Serge 7066
 
3243 Serge 7067
	if (ret != 0)
2330 Serge 7068
	return ret;
3243 Serge 7069
 
7070
	for_each_encoder_on_crtc(dev, crtc, encoder) {
7071
		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
7072
			encoder->base.base.id,
7073
			drm_get_encoder_name(&encoder->base),
7074
			mode->base.id, mode->name);
3746 Serge 7075
			encoder->mode_set(encoder);
3243 Serge 7076
	}
7077
 
7078
	return 0;
2330 Serge 7079
}
2327 Serge 7080
 
4560 Serge 7081
static struct {
7082
	int clock;
7083
	u32 config;
7084
} hdmi_audio_clock[] = {
7085
	{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
7086
	{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
7087
	{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
7088
	{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
7089
	{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
7090
	{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
7091
	{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
7092
	{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
7093
	{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
7094
	{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
7095
};
7096
 
7097
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
7098
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
7099
{
7100
	int i;
7101
 
7102
	for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
7103
		if (mode->clock == hdmi_audio_clock[i].clock)
7104
			break;
7105
	}
7106
 
7107
	if (i == ARRAY_SIZE(hdmi_audio_clock)) {
7108
		DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
7109
		i = 1;
7110
	}
7111
 
7112
	DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
7113
		      hdmi_audio_clock[i].clock,
7114
		      hdmi_audio_clock[i].config);
7115
 
7116
	return hdmi_audio_clock[i].config;
7117
}
7118
 
2342 Serge 7119
static bool intel_eld_uptodate(struct drm_connector *connector,
7120
			       int reg_eldv, uint32_t bits_eldv,
7121
			       int reg_elda, uint32_t bits_elda,
7122
			       int reg_edid)
7123
{
7124
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7125
	uint8_t *eld = connector->eld;
7126
	uint32_t i;
7127
 
7128
	i = I915_READ(reg_eldv);
7129
	i &= bits_eldv;
7130
 
7131
	if (!eld[0])
7132
		return !i;
7133
 
7134
	if (!i)
7135
		return false;
7136
 
7137
	i = I915_READ(reg_elda);
7138
	i &= ~bits_elda;
7139
	I915_WRITE(reg_elda, i);
7140
 
7141
	for (i = 0; i < eld[2]; i++)
7142
		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
7143
			return false;
7144
 
7145
	return true;
7146
}
7147
 
7148
static void g4x_write_eld(struct drm_connector *connector,
4560 Serge 7149
			  struct drm_crtc *crtc,
7150
			  struct drm_display_mode *mode)
2342 Serge 7151
{
7152
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7153
	uint8_t *eld = connector->eld;
7154
	uint32_t eldv;
7155
	uint32_t len;
7156
	uint32_t i;
7157
 
7158
	i = I915_READ(G4X_AUD_VID_DID);
7159
 
7160
	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
7161
		eldv = G4X_ELDV_DEVCL_DEVBLC;
7162
	else
7163
		eldv = G4X_ELDV_DEVCTG;
7164
 
7165
	if (intel_eld_uptodate(connector,
7166
			       G4X_AUD_CNTL_ST, eldv,
7167
			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
7168
			       G4X_HDMIW_HDMIEDID))
7169
		return;
7170
 
7171
	i = I915_READ(G4X_AUD_CNTL_ST);
7172
	i &= ~(eldv | G4X_ELD_ADDR);
7173
	len = (i >> 9) & 0x1f;		/* ELD buffer size */
7174
	I915_WRITE(G4X_AUD_CNTL_ST, i);
7175
 
7176
	if (!eld[0])
7177
		return;
7178
 
7179
	len = min_t(uint8_t, eld[2], len);
7180
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7181
	for (i = 0; i < len; i++)
7182
		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
7183
 
7184
	i = I915_READ(G4X_AUD_CNTL_ST);
7185
	i |= eldv;
7186
	I915_WRITE(G4X_AUD_CNTL_ST, i);
7187
}
7188
 
3031 serge 7189
static void haswell_write_eld(struct drm_connector *connector,
4560 Serge 7190
			      struct drm_crtc *crtc,
7191
			      struct drm_display_mode *mode)
3031 serge 7192
{
7193
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7194
	uint8_t *eld = connector->eld;
7195
	struct drm_device *dev = crtc->dev;
3480 Serge 7196
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 7197
	uint32_t eldv;
7198
	uint32_t i;
7199
	int len;
7200
	int pipe = to_intel_crtc(crtc)->pipe;
7201
	int tmp;
7202
 
7203
	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
7204
	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
7205
	int aud_config = HSW_AUD_CFG(pipe);
7206
	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
7207
 
7208
 
7209
	DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
7210
 
7211
	/* Audio output enable */
7212
	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
7213
	tmp = I915_READ(aud_cntrl_st2);
7214
	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
7215
	I915_WRITE(aud_cntrl_st2, tmp);
7216
 
7217
	/* Wait for 1 vertical blank */
7218
	intel_wait_for_vblank(dev, pipe);
7219
 
7220
	/* Set ELD valid state */
7221
	tmp = I915_READ(aud_cntrl_st2);
4104 Serge 7222
	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
3031 serge 7223
	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
7224
	I915_WRITE(aud_cntrl_st2, tmp);
7225
	tmp = I915_READ(aud_cntrl_st2);
4104 Serge 7226
	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
3031 serge 7227
 
7228
	/* Enable HDMI mode */
7229
	tmp = I915_READ(aud_config);
4104 Serge 7230
	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
3031 serge 7231
	/* clear N_programing_enable and N_value_index */
7232
	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
7233
	I915_WRITE(aud_config, tmp);
7234
 
7235
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7236
 
7237
	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
3480 Serge 7238
	intel_crtc->eld_vld = true;
3031 serge 7239
 
7240
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7241
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7242
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
7243
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
4560 Serge 7244
	} else {
7245
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7246
	}
3031 serge 7247
 
7248
	if (intel_eld_uptodate(connector,
7249
			       aud_cntrl_st2, eldv,
7250
			       aud_cntl_st, IBX_ELD_ADDRESS,
7251
			       hdmiw_hdmiedid))
7252
		return;
7253
 
7254
	i = I915_READ(aud_cntrl_st2);
7255
	i &= ~eldv;
7256
	I915_WRITE(aud_cntrl_st2, i);
7257
 
7258
	if (!eld[0])
7259
		return;
7260
 
7261
	i = I915_READ(aud_cntl_st);
7262
	i &= ~IBX_ELD_ADDRESS;
7263
	I915_WRITE(aud_cntl_st, i);
7264
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
7265
	DRM_DEBUG_DRIVER("port num:%d\n", i);
7266
 
7267
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
7268
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7269
	for (i = 0; i < len; i++)
7270
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7271
 
7272
	i = I915_READ(aud_cntrl_st2);
7273
	i |= eldv;
7274
	I915_WRITE(aud_cntrl_st2, i);
7275
 
7276
}
7277
 
2342 Serge 7278
static void ironlake_write_eld(struct drm_connector *connector,
4560 Serge 7279
			       struct drm_crtc *crtc,
7280
			       struct drm_display_mode *mode)
2342 Serge 7281
{
7282
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7283
	uint8_t *eld = connector->eld;
7284
	uint32_t eldv;
7285
	uint32_t i;
7286
	int len;
7287
	int hdmiw_hdmiedid;
3031 serge 7288
	int aud_config;
2342 Serge 7289
	int aud_cntl_st;
7290
	int aud_cntrl_st2;
3031 serge 7291
	int pipe = to_intel_crtc(crtc)->pipe;
2342 Serge 7292
 
7293
	if (HAS_PCH_IBX(connector->dev)) {
3031 serge 7294
		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
7295
		aud_config = IBX_AUD_CFG(pipe);
7296
		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
2342 Serge 7297
		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
4560 Serge 7298
	} else if (IS_VALLEYVIEW(connector->dev)) {
7299
		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
7300
		aud_config = VLV_AUD_CFG(pipe);
7301
		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
7302
		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
2342 Serge 7303
	} else {
3031 serge 7304
		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
7305
		aud_config = CPT_AUD_CFG(pipe);
7306
		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
2342 Serge 7307
		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
7308
	}
7309
 
3031 serge 7310
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
2342 Serge 7311
 
4560 Serge 7312
	if (IS_VALLEYVIEW(connector->dev))  {
7313
		struct intel_encoder *intel_encoder;
7314
		struct intel_digital_port *intel_dig_port;
7315
 
7316
		intel_encoder = intel_attached_encoder(connector);
7317
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
7318
		i = intel_dig_port->port;
7319
	} else {
2342 Serge 7320
	i = I915_READ(aud_cntl_st);
4560 Serge 7321
		i = (i >> 29) & DIP_PORT_SEL_MASK;
7322
		/* DIP_Port_Select, 0x1 = PortB */
7323
	}
7324
 
2342 Serge 7325
	if (!i) {
7326
		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
7327
		/* operate blindly on all ports */
7328
		eldv = IBX_ELD_VALIDB;
7329
		eldv |= IBX_ELD_VALIDB << 4;
7330
		eldv |= IBX_ELD_VALIDB << 8;
7331
	} else {
4104 Serge 7332
		DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
2342 Serge 7333
		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
7334
	}
7335
 
7336
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7337
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7338
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
3031 serge 7339
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
4560 Serge 7340
	} else {
7341
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7342
	}
2342 Serge 7343
 
7344
	if (intel_eld_uptodate(connector,
7345
			       aud_cntrl_st2, eldv,
7346
			       aud_cntl_st, IBX_ELD_ADDRESS,
7347
			       hdmiw_hdmiedid))
7348
		return;
7349
 
7350
	i = I915_READ(aud_cntrl_st2);
7351
	i &= ~eldv;
7352
	I915_WRITE(aud_cntrl_st2, i);
7353
 
7354
	if (!eld[0])
7355
		return;
7356
 
7357
	i = I915_READ(aud_cntl_st);
7358
	i &= ~IBX_ELD_ADDRESS;
7359
	I915_WRITE(aud_cntl_st, i);
7360
 
7361
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
7362
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7363
	for (i = 0; i < len; i++)
7364
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7365
 
7366
	i = I915_READ(aud_cntrl_st2);
7367
	i |= eldv;
7368
	I915_WRITE(aud_cntrl_st2, i);
7369
}
7370
 
7371
void intel_write_eld(struct drm_encoder *encoder,
7372
		     struct drm_display_mode *mode)
7373
{
7374
	struct drm_crtc *crtc = encoder->crtc;
7375
	struct drm_connector *connector;
7376
	struct drm_device *dev = encoder->dev;
7377
	struct drm_i915_private *dev_priv = dev->dev_private;
7378
 
7379
	connector = drm_select_eld(encoder, mode);
7380
	if (!connector)
7381
		return;
7382
 
7383
	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7384
			 connector->base.id,
7385
			 drm_get_connector_name(connector),
7386
			 connector->encoder->base.id,
7387
			 drm_get_encoder_name(connector->encoder));
7388
 
7389
	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
7390
 
7391
	if (dev_priv->display.write_eld)
4560 Serge 7392
		dev_priv->display.write_eld(connector, crtc, mode);
2342 Serge 7393
}
7394
 
3031 serge 7395
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
7396
{
7397
	struct drm_device *dev = crtc->dev;
7398
	struct drm_i915_private *dev_priv = dev->dev_private;
7399
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7400
	bool visible = base != 0;
7401
	u32 cntl;
2327 Serge 7402
 
3031 serge 7403
	if (intel_crtc->cursor_visible == visible)
7404
		return;
2327 Serge 7405
 
3031 serge 7406
	cntl = I915_READ(_CURACNTR);
7407
	if (visible) {
7408
		/* On these chipsets we can only modify the base whilst
7409
		 * the cursor is disabled.
7410
		 */
7411
		I915_WRITE(_CURABASE, base);
2327 Serge 7412
 
3031 serge 7413
		cntl &= ~(CURSOR_FORMAT_MASK);
7414
		/* XXX width must be 64, stride 256 => 0x00 << 28 */
7415
		cntl |= CURSOR_ENABLE |
7416
			CURSOR_GAMMA_ENABLE |
7417
			CURSOR_FORMAT_ARGB;
7418
	} else
7419
		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
7420
	I915_WRITE(_CURACNTR, cntl);
2327 Serge 7421
 
3031 serge 7422
	intel_crtc->cursor_visible = visible;
7423
}
2327 Serge 7424
 
3031 serge 7425
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
7426
{
7427
	struct drm_device *dev = crtc->dev;
7428
	struct drm_i915_private *dev_priv = dev->dev_private;
7429
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7430
	int pipe = intel_crtc->pipe;
7431
	bool visible = base != 0;
2327 Serge 7432
 
3031 serge 7433
	if (intel_crtc->cursor_visible != visible) {
7434
		uint32_t cntl = I915_READ(CURCNTR(pipe));
7435
		if (base) {
7436
			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
7437
			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
7438
			cntl |= pipe << 28; /* Connect to correct pipe */
7439
		} else {
7440
			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7441
			cntl |= CURSOR_MODE_DISABLE;
7442
		}
7443
		I915_WRITE(CURCNTR(pipe), cntl);
2327 Serge 7444
 
3031 serge 7445
		intel_crtc->cursor_visible = visible;
7446
	}
7447
	/* and commit changes on next vblank */
4371 Serge 7448
	POSTING_READ(CURCNTR(pipe));
3031 serge 7449
	I915_WRITE(CURBASE(pipe), base);
4371 Serge 7450
	POSTING_READ(CURBASE(pipe));
3031 serge 7451
}
2327 Serge 7452
 
3031 serge 7453
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
7454
{
7455
	struct drm_device *dev = crtc->dev;
7456
	struct drm_i915_private *dev_priv = dev->dev_private;
7457
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7458
	int pipe = intel_crtc->pipe;
7459
	bool visible = base != 0;
2327 Serge 7460
 
3031 serge 7461
	if (intel_crtc->cursor_visible != visible) {
7462
		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
7463
		if (base) {
7464
			cntl &= ~CURSOR_MODE;
7465
			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
7466
		} else {
7467
			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7468
			cntl |= CURSOR_MODE_DISABLE;
7469
		}
4560 Serge 7470
		if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3480 Serge 7471
			cntl |= CURSOR_PIPE_CSC_ENABLE;
4104 Serge 7472
			cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
7473
		}
3031 serge 7474
		I915_WRITE(CURCNTR_IVB(pipe), cntl);
2327 Serge 7475
 
3031 serge 7476
		intel_crtc->cursor_visible = visible;
7477
	}
7478
	/* and commit changes on next vblank */
4371 Serge 7479
	POSTING_READ(CURCNTR_IVB(pipe));
3031 serge 7480
	I915_WRITE(CURBASE_IVB(pipe), base);
4371 Serge 7481
	POSTING_READ(CURBASE_IVB(pipe));
3031 serge 7482
}
2327 Serge 7483
 
3031 serge 7484
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
7485
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
7486
				     bool on)
7487
{
7488
	struct drm_device *dev = crtc->dev;
7489
	struct drm_i915_private *dev_priv = dev->dev_private;
7490
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7491
	int pipe = intel_crtc->pipe;
7492
	int x = intel_crtc->cursor_x;
7493
	int y = intel_crtc->cursor_y;
4560 Serge 7494
	u32 base = 0, pos = 0;
3031 serge 7495
	bool visible;
2327 Serge 7496
 
4560 Serge 7497
	if (on)
7498
		base = intel_crtc->cursor_addr;
2327 Serge 7499
 
4560 Serge 7500
	if (x >= intel_crtc->config.pipe_src_w)
3031 serge 7501
			base = 0;
2327 Serge 7502
 
4560 Serge 7503
	if (y >= intel_crtc->config.pipe_src_h)
3031 serge 7504
		base = 0;
2327 Serge 7505
 
3031 serge 7506
	if (x < 0) {
4560 Serge 7507
		if (x + intel_crtc->cursor_width <= 0)
3031 serge 7508
			base = 0;
2327 Serge 7509
 
3031 serge 7510
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
7511
		x = -x;
7512
	}
7513
	pos |= x << CURSOR_X_SHIFT;
2327 Serge 7514
 
3031 serge 7515
	if (y < 0) {
4560 Serge 7516
		if (y + intel_crtc->cursor_height <= 0)
3031 serge 7517
			base = 0;
2327 Serge 7518
 
3031 serge 7519
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
7520
		y = -y;
7521
	}
7522
	pos |= y << CURSOR_Y_SHIFT;
2327 Serge 7523
 
3031 serge 7524
	visible = base != 0;
7525
	if (!visible && !intel_crtc->cursor_visible)
7526
		return;
2327 Serge 7527
 
4560 Serge 7528
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3031 serge 7529
		I915_WRITE(CURPOS_IVB(pipe), pos);
7530
		ivb_update_cursor(crtc, base);
7531
	} else {
7532
		I915_WRITE(CURPOS(pipe), pos);
4560 Serge 7533
		i9xx_update_cursor(crtc, base);
3031 serge 7534
	}
7535
}
2327 Serge 7536
 
4557 Serge 7537
#if 0
3031 serge 7538
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7539
				 struct drm_file *file,
7540
				 uint32_t handle,
7541
				 uint32_t width, uint32_t height)
7542
{
7543
	struct drm_device *dev = crtc->dev;
7544
	struct drm_i915_private *dev_priv = dev->dev_private;
7545
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7546
	struct drm_i915_gem_object *obj;
7547
	uint32_t addr;
7548
	int ret;
2327 Serge 7549
 
3031 serge 7550
	/* if we want to turn off the cursor ignore width and height */
7551
	if (!handle) {
7552
		DRM_DEBUG_KMS("cursor off\n");
7553
		addr = 0;
7554
		obj = NULL;
7555
		mutex_lock(&dev->struct_mutex);
7556
		goto finish;
7557
	}
2327 Serge 7558
 
3031 serge 7559
	/* Currently we only support 64x64 cursors */
7560
	if (width != 64 || height != 64) {
7561
		DRM_ERROR("we currently only support 64x64 cursors\n");
7562
		return -EINVAL;
7563
	}
2327 Serge 7564
 
3031 serge 7565
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
7566
	if (&obj->base == NULL)
7567
		return -ENOENT;
2327 Serge 7568
 
3031 serge 7569
	if (obj->base.size < width * height * 4) {
7570
		DRM_ERROR("buffer is to small\n");
7571
		ret = -ENOMEM;
7572
		goto fail;
7573
	}
2327 Serge 7574
 
3031 serge 7575
	/* we only need to pin inside GTT if cursor is non-phy */
7576
	mutex_lock(&dev->struct_mutex);
7577
	if (!dev_priv->info->cursor_needs_physical) {
3746 Serge 7578
		unsigned alignment;
7579
 
3031 serge 7580
		if (obj->tiling_mode) {
7581
			DRM_ERROR("cursor cannot be tiled\n");
7582
			ret = -EINVAL;
7583
			goto fail_locked;
7584
		}
2327 Serge 7585
 
3746 Serge 7586
		/* Note that the w/a also requires 2 PTE of padding following
7587
		 * the bo. We currently fill all unused PTE with the shadow
7588
		 * page and so we should always have valid PTE following the
7589
		 * cursor preventing the VT-d warning.
7590
		 */
7591
		alignment = 0;
7592
		if (need_vtd_wa(dev))
7593
			alignment = 64*1024;
7594
 
7595
		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
3031 serge 7596
		if (ret) {
7597
			DRM_ERROR("failed to move cursor bo into the GTT\n");
7598
			goto fail_locked;
7599
		}
2327 Serge 7600
 
3031 serge 7601
		ret = i915_gem_object_put_fence(obj);
7602
		if (ret) {
7603
			DRM_ERROR("failed to release fence for cursor");
7604
			goto fail_unpin;
7605
		}
2327 Serge 7606
 
4104 Serge 7607
		addr = i915_gem_obj_ggtt_offset(obj);
3031 serge 7608
	} else {
7609
		int align = IS_I830(dev) ? 16 * 1024 : 256;
7610
		ret = i915_gem_attach_phys_object(dev, obj,
7611
						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
7612
						  align);
7613
		if (ret) {
7614
			DRM_ERROR("failed to attach phys object\n");
7615
			goto fail_locked;
7616
		}
7617
		addr = obj->phys_obj->handle->busaddr;
7618
	}
2327 Serge 7619
 
3031 serge 7620
	if (IS_GEN2(dev))
7621
		I915_WRITE(CURSIZE, (height << 12) | width);
2327 Serge 7622
 
3031 serge 7623
 finish:
7624
	if (intel_crtc->cursor_bo) {
7625
		if (dev_priv->info->cursor_needs_physical) {
7626
			if (intel_crtc->cursor_bo != obj)
7627
				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7628
		} else
4104 Serge 7629
			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
3031 serge 7630
		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
7631
	}
2327 Serge 7632
 
3031 serge 7633
	mutex_unlock(&dev->struct_mutex);
2327 Serge 7634
 
3031 serge 7635
	intel_crtc->cursor_addr = addr;
7636
	intel_crtc->cursor_bo = obj;
7637
	intel_crtc->cursor_width = width;
7638
	intel_crtc->cursor_height = height;
2327 Serge 7639
 
4104 Serge 7640
	if (intel_crtc->active)
7641
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
2327 Serge 7642
 
3031 serge 7643
	return 0;
7644
fail_unpin:
4104 Serge 7645
	i915_gem_object_unpin_from_display_plane(obj);
3031 serge 7646
fail_locked:
7647
	mutex_unlock(&dev->struct_mutex);
7648
fail:
7649
	drm_gem_object_unreference_unlocked(&obj->base);
7650
	return ret;
7651
}
4557 Serge 7652
#endif
2327 Serge 7653
 
3031 serge 7654
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
7655
{
7656
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7657
 
4560 Serge 7658
	intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
7659
	intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
3031 serge 7660
 
4104 Serge 7661
	if (intel_crtc->active)
7662
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
3031 serge 7663
 
7664
	return 0;
7665
}
7666
 
2330 Serge 7667
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7668
				 u16 *blue, uint32_t start, uint32_t size)
7669
{
7670
	int end = (start + size > 256) ? 256 : start + size, i;
7671
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 7672
 
2330 Serge 7673
	for (i = start; i < end; i++) {
7674
		intel_crtc->lut_r[i] = red[i] >> 8;
7675
		intel_crtc->lut_g[i] = green[i] >> 8;
7676
		intel_crtc->lut_b[i] = blue[i] >> 8;
7677
	}
2327 Serge 7678
 
2330 Serge 7679
	intel_crtc_load_lut(crtc);
7680
}
2327 Serge 7681
 
2330 Serge 7682
/* VESA 640x480x72Hz mode to set on the pipe */
7683
static struct drm_display_mode load_detect_mode = {
7684
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
7685
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
7686
};
2327 Serge 7687
 
4560 Serge 7688
struct drm_framebuffer *
3031 serge 7689
intel_framebuffer_create(struct drm_device *dev,
7690
			 struct drm_mode_fb_cmd2 *mode_cmd,
7691
			 struct drm_i915_gem_object *obj)
7692
{
7693
	struct intel_framebuffer *intel_fb;
7694
	int ret;
2327 Serge 7695
 
3031 serge 7696
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7697
	if (!intel_fb) {
7698
		drm_gem_object_unreference_unlocked(&obj->base);
7699
		return ERR_PTR(-ENOMEM);
7700
	}
2327 Serge 7701
 
4560 Serge 7702
	ret = i915_mutex_lock_interruptible(dev);
7703
	if (ret)
7704
		goto err;
7705
 
3031 serge 7706
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
4560 Serge 7707
	mutex_unlock(&dev->struct_mutex);
7708
	if (ret)
7709
		goto err;
7710
 
7711
	return &intel_fb->base;
7712
err:
3031 serge 7713
		drm_gem_object_unreference_unlocked(&obj->base);
7714
		kfree(intel_fb);
4560 Serge 7715
 
3031 serge 7716
		return ERR_PTR(ret);
7717
}
2327 Serge 7718
 
2330 Serge 7719
static u32
7720
intel_framebuffer_pitch_for_width(int width, int bpp)
7721
{
7722
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
7723
	return ALIGN(pitch, 64);
7724
}
2327 Serge 7725
 
2330 Serge 7726
static u32
7727
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
7728
{
7729
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
7730
	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
7731
}
2327 Serge 7732
 
2330 Serge 7733
static struct drm_framebuffer *
7734
intel_framebuffer_create_for_mode(struct drm_device *dev,
7735
				  struct drm_display_mode *mode,
7736
				  int depth, int bpp)
7737
{
7738
	struct drm_i915_gem_object *obj;
3243 Serge 7739
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2327 Serge 7740
 
4104 Serge 7741
	return NULL;
2330 Serge 7742
}
2327 Serge 7743
 
2330 Serge 7744
static struct drm_framebuffer *
7745
mode_fits_in_fbdev(struct drm_device *dev,
7746
		   struct drm_display_mode *mode)
7747
{
4560 Serge 7748
#ifdef CONFIG_DRM_I915_FBDEV
2330 Serge 7749
	struct drm_i915_private *dev_priv = dev->dev_private;
7750
	struct drm_i915_gem_object *obj;
7751
	struct drm_framebuffer *fb;
2327 Serge 7752
 
4280 Serge 7753
	if (dev_priv->fbdev == NULL)
7754
		return NULL;
2327 Serge 7755
 
4280 Serge 7756
	obj = dev_priv->fbdev->ifb.obj;
7757
	if (obj == NULL)
2330 Serge 7758
		return NULL;
2327 Serge 7759
 
4280 Serge 7760
	fb = &dev_priv->fbdev->ifb.base;
3031 serge 7761
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
7762
							       fb->bits_per_pixel))
4280 Serge 7763
		return NULL;
2327 Serge 7764
 
3031 serge 7765
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
7766
		return NULL;
7767
 
4280 Serge 7768
	return fb;
4560 Serge 7769
#else
7770
	return NULL;
7771
#endif
2330 Serge 7772
}
2327 Serge 7773
 
3031 serge 7774
bool intel_get_load_detect_pipe(struct drm_connector *connector,
2330 Serge 7775
				struct drm_display_mode *mode,
7776
				struct intel_load_detect_pipe *old)
7777
{
7778
	struct intel_crtc *intel_crtc;
3031 serge 7779
	struct intel_encoder *intel_encoder =
7780
		intel_attached_encoder(connector);
2330 Serge 7781
	struct drm_crtc *possible_crtc;
7782
	struct drm_encoder *encoder = &intel_encoder->base;
7783
	struct drm_crtc *crtc = NULL;
7784
	struct drm_device *dev = encoder->dev;
3031 serge 7785
	struct drm_framebuffer *fb;
2330 Serge 7786
	int i = -1;
2327 Serge 7787
 
2330 Serge 7788
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7789
		      connector->base.id, drm_get_connector_name(connector),
7790
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 7791
 
2330 Serge 7792
	/*
7793
	 * Algorithm gets a little messy:
7794
	 *
7795
	 *   - if the connector already has an assigned crtc, use it (but make
7796
	 *     sure it's on first)
7797
	 *
7798
	 *   - try to find the first unused crtc that can drive this connector,
7799
	 *     and use that if we find one
7800
	 */
2327 Serge 7801
 
2330 Serge 7802
	/* See if we already have a CRTC for this connector */
7803
	if (encoder->crtc) {
7804
		crtc = encoder->crtc;
2327 Serge 7805
 
3480 Serge 7806
		mutex_lock(&crtc->mutex);
7807
 
3031 serge 7808
		old->dpms_mode = connector->dpms;
2330 Serge 7809
		old->load_detect_temp = false;
2327 Serge 7810
 
2330 Serge 7811
		/* Make sure the crtc and connector are running */
3031 serge 7812
		if (connector->dpms != DRM_MODE_DPMS_ON)
7813
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
2327 Serge 7814
 
2330 Serge 7815
		return true;
7816
	}
2327 Serge 7817
 
2330 Serge 7818
	/* Find an unused one (if possible) */
7819
	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
7820
		i++;
7821
		if (!(encoder->possible_crtcs & (1 << i)))
7822
			continue;
7823
		if (!possible_crtc->enabled) {
7824
			crtc = possible_crtc;
7825
			break;
7826
		}
7827
	}
2327 Serge 7828
 
2330 Serge 7829
	/*
7830
	 * If we didn't find an unused CRTC, don't use any.
7831
	 */
7832
	if (!crtc) {
7833
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
7834
		return false;
7835
	}
2327 Serge 7836
 
3480 Serge 7837
	mutex_lock(&crtc->mutex);
3031 serge 7838
	intel_encoder->new_crtc = to_intel_crtc(crtc);
7839
	to_intel_connector(connector)->new_encoder = intel_encoder;
2327 Serge 7840
 
2330 Serge 7841
	intel_crtc = to_intel_crtc(crtc);
3031 serge 7842
	old->dpms_mode = connector->dpms;
2330 Serge 7843
	old->load_detect_temp = true;
7844
	old->release_fb = NULL;
2327 Serge 7845
 
2330 Serge 7846
	if (!mode)
7847
		mode = &load_detect_mode;
2327 Serge 7848
 
2330 Serge 7849
	/* We need a framebuffer large enough to accommodate all accesses
7850
	 * that the plane may generate whilst we perform load detection.
7851
	 * We can not rely on the fbcon either being present (we get called
7852
	 * during its initialisation to detect all boot displays, or it may
7853
	 * not even exist) or that it is large enough to satisfy the
7854
	 * requested mode.
7855
	 */
3031 serge 7856
	fb = mode_fits_in_fbdev(dev, mode);
7857
	if (fb == NULL) {
2330 Serge 7858
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
3031 serge 7859
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
7860
		old->release_fb = fb;
2330 Serge 7861
	} else
7862
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
3031 serge 7863
	if (IS_ERR(fb)) {
2330 Serge 7864
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
3480 Serge 7865
		mutex_unlock(&crtc->mutex);
3243 Serge 7866
		return false;
2330 Serge 7867
	}
2327 Serge 7868
 
3480 Serge 7869
	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
2330 Serge 7870
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
7871
		if (old->release_fb)
7872
			old->release_fb->funcs->destroy(old->release_fb);
3480 Serge 7873
		mutex_unlock(&crtc->mutex);
3243 Serge 7874
		return false;
2330 Serge 7875
	}
2327 Serge 7876
 
2330 Serge 7877
	/* let the connector get through one full cycle before testing */
7878
	intel_wait_for_vblank(dev, intel_crtc->pipe);
7879
	return true;
7880
}
2327 Serge 7881
 
3031 serge 7882
void intel_release_load_detect_pipe(struct drm_connector *connector,
2330 Serge 7883
				    struct intel_load_detect_pipe *old)
7884
{
3031 serge 7885
	struct intel_encoder *intel_encoder =
7886
		intel_attached_encoder(connector);
2330 Serge 7887
	struct drm_encoder *encoder = &intel_encoder->base;
3480 Serge 7888
	struct drm_crtc *crtc = encoder->crtc;
2327 Serge 7889
 
2330 Serge 7890
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7891
		      connector->base.id, drm_get_connector_name(connector),
7892
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 7893
 
2330 Serge 7894
	if (old->load_detect_temp) {
3031 serge 7895
		to_intel_connector(connector)->new_encoder = NULL;
7896
		intel_encoder->new_crtc = NULL;
7897
		intel_set_mode(crtc, NULL, 0, 0, NULL);
7898
 
3480 Serge 7899
		if (old->release_fb) {
7900
			drm_framebuffer_unregister_private(old->release_fb);
7901
			drm_framebuffer_unreference(old->release_fb);
7902
		}
2327 Serge 7903
 
3480 Serge 7904
		mutex_unlock(&crtc->mutex);
2330 Serge 7905
		return;
7906
	}
2327 Serge 7907
 
2330 Serge 7908
	/* Switch crtc and encoder back off if necessary */
3031 serge 7909
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
7910
		connector->funcs->dpms(connector, old->dpms_mode);
3480 Serge 7911
 
7912
	mutex_unlock(&crtc->mutex);
2330 Serge 7913
}
2327 Serge 7914
 
4560 Serge 7915
static int i9xx_pll_refclk(struct drm_device *dev,
7916
			   const struct intel_crtc_config *pipe_config)
7917
{
7918
	struct drm_i915_private *dev_priv = dev->dev_private;
7919
	u32 dpll = pipe_config->dpll_hw_state.dpll;
7920
 
7921
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7922
		return dev_priv->vbt.lvds_ssc_freq;
7923
	else if (HAS_PCH_SPLIT(dev))
7924
		return 120000;
7925
	else if (!IS_GEN2(dev))
7926
		return 96000;
7927
	else
7928
		return 48000;
7929
}
7930
 
2330 Serge 7931
/* Returns the clock of the currently programmed mode of the given pipe. */
4104 Serge 7932
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7933
				struct intel_crtc_config *pipe_config)
2330 Serge 7934
{
4104 Serge 7935
	struct drm_device *dev = crtc->base.dev;
2330 Serge 7936
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 7937
	int pipe = pipe_config->cpu_transcoder;
4560 Serge 7938
	u32 dpll = pipe_config->dpll_hw_state.dpll;
2330 Serge 7939
	u32 fp;
7940
	intel_clock_t clock;
4560 Serge 7941
	int refclk = i9xx_pll_refclk(dev, pipe_config);
2327 Serge 7942
 
2330 Serge 7943
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4560 Serge 7944
		fp = pipe_config->dpll_hw_state.fp0;
2330 Serge 7945
	else
4560 Serge 7946
		fp = pipe_config->dpll_hw_state.fp1;
2327 Serge 7947
 
2330 Serge 7948
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7949
	if (IS_PINEVIEW(dev)) {
7950
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7951
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
7952
	} else {
7953
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7954
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7955
	}
2327 Serge 7956
 
2330 Serge 7957
	if (!IS_GEN2(dev)) {
7958
		if (IS_PINEVIEW(dev))
7959
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7960
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
7961
		else
7962
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
7963
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 7964
 
2330 Serge 7965
		switch (dpll & DPLL_MODE_MASK) {
7966
		case DPLLB_MODE_DAC_SERIAL:
7967
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7968
				5 : 10;
7969
			break;
7970
		case DPLLB_MODE_LVDS:
7971
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7972
				7 : 14;
7973
			break;
7974
		default:
7975
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
7976
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
4104 Serge 7977
			return;
2330 Serge 7978
		}
2327 Serge 7979
 
4104 Serge 7980
		if (IS_PINEVIEW(dev))
4560 Serge 7981
			pineview_clock(refclk, &clock);
4104 Serge 7982
		else
4560 Serge 7983
			i9xx_clock(refclk, &clock);
2330 Serge 7984
	} else {
4560 Serge 7985
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
7986
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
2327 Serge 7987
 
2330 Serge 7988
		if (is_lvds) {
7989
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7990
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
4560 Serge 7991
 
7992
			if (lvds & LVDS_CLKB_POWER_UP)
7993
				clock.p2 = 7;
7994
			else
2330 Serge 7995
			clock.p2 = 14;
7996
		} else {
7997
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
7998
				clock.p1 = 2;
7999
			else {
8000
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8001
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8002
			}
8003
			if (dpll & PLL_P2_DIVIDE_BY_4)
8004
				clock.p2 = 4;
8005
			else
8006
				clock.p2 = 2;
4560 Serge 8007
		}
2327 Serge 8008
 
4560 Serge 8009
		i9xx_clock(refclk, &clock);
2330 Serge 8010
	}
2327 Serge 8011
 
4560 Serge 8012
	/*
8013
	 * This value includes pixel_multiplier. We will use
8014
	 * port_clock to compute adjusted_mode.crtc_clock in the
8015
	 * encoder's get_config() function.
8016
	 */
8017
	pipe_config->port_clock = clock.dot;
4104 Serge 8018
}
8019
 
4560 Serge 8020
int intel_dotclock_calculate(int link_freq,
8021
			     const struct intel_link_m_n *m_n)
4104 Serge 8022
{
8023
	/*
8024
	 * The calculation for the data clock is:
4560 Serge 8025
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4104 Serge 8026
	 * But we want to avoid losing precison if possible, so:
4560 Serge 8027
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4104 Serge 8028
	 *
8029
	 * and the link clock is simpler:
4560 Serge 8030
	 * link_clock = (m * link_clock) / n
2330 Serge 8031
	 */
2327 Serge 8032
 
4560 Serge 8033
	if (!m_n->link_n)
8034
		return 0;
4104 Serge 8035
 
4560 Serge 8036
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8037
}
4104 Serge 8038
 
4560 Serge 8039
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8040
				   struct intel_crtc_config *pipe_config)
8041
{
8042
	struct drm_device *dev = crtc->base.dev;
4104 Serge 8043
 
4560 Serge 8044
	/* read out port_clock from the DPLL */
8045
	i9xx_crtc_clock_get(crtc, pipe_config);
4104 Serge 8046
 
4560 Serge 8047
	/*
8048
	 * This value does not include pixel_multiplier.
8049
	 * We will check that port_clock and adjusted_mode.crtc_clock
8050
	 * agree once we know their relationship in the encoder's
8051
	 * get_config() function.
8052
	 */
8053
	pipe_config->adjusted_mode.crtc_clock =
8054
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8055
					 &pipe_config->fdi_m_n);
2330 Serge 8056
}
2327 Serge 8057
 
2330 Serge 8058
/** Returns the currently programmed mode of the given pipe. */
8059
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8060
					     struct drm_crtc *crtc)
8061
{
8062
	struct drm_i915_private *dev_priv = dev->dev_private;
8063
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 8064
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
2330 Serge 8065
	struct drm_display_mode *mode;
4104 Serge 8066
	struct intel_crtc_config pipe_config;
3243 Serge 8067
	int htot = I915_READ(HTOTAL(cpu_transcoder));
8068
	int hsync = I915_READ(HSYNC(cpu_transcoder));
8069
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
8070
	int vsync = I915_READ(VSYNC(cpu_transcoder));
4560 Serge 8071
	enum pipe pipe = intel_crtc->pipe;
2327 Serge 8072
 
2330 Serge 8073
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8074
	if (!mode)
8075
		return NULL;
8076
 
4104 Serge 8077
	/*
8078
	 * Construct a pipe_config sufficient for getting the clock info
8079
	 * back out of crtc_clock_get.
8080
	 *
8081
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8082
	 * to use a real value here instead.
8083
	 */
4560 Serge 8084
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
4104 Serge 8085
	pipe_config.pixel_multiplier = 1;
4560 Serge 8086
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8087
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8088
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
4104 Serge 8089
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8090
 
4560 Serge 8091
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
2330 Serge 8092
	mode->hdisplay = (htot & 0xffff) + 1;
8093
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8094
	mode->hsync_start = (hsync & 0xffff) + 1;
8095
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8096
	mode->vdisplay = (vtot & 0xffff) + 1;
8097
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8098
	mode->vsync_start = (vsync & 0xffff) + 1;
8099
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8100
 
8101
	drm_mode_set_name(mode);
8102
 
8103
	return mode;
8104
}
8105
 
2327 Serge 8106
static void intel_increase_pllclock(struct drm_crtc *crtc)
8107
{
8108
	struct drm_device *dev = crtc->dev;
8109
	drm_i915_private_t *dev_priv = dev->dev_private;
8110
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8111
	int pipe = intel_crtc->pipe;
8112
	int dpll_reg = DPLL(pipe);
8113
	int dpll;
8114
 
8115
	if (HAS_PCH_SPLIT(dev))
8116
		return;
8117
 
8118
	if (!dev_priv->lvds_downclock_avail)
8119
		return;
8120
 
8121
	dpll = I915_READ(dpll_reg);
8122
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
8123
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
8124
 
3031 serge 8125
		assert_panel_unlocked(dev_priv, pipe);
2327 Serge 8126
 
8127
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
8128
		I915_WRITE(dpll_reg, dpll);
8129
		intel_wait_for_vblank(dev, pipe);
8130
 
8131
		dpll = I915_READ(dpll_reg);
8132
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
8133
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
8134
	}
8135
}
8136
 
3031 serge 8137
static void intel_decrease_pllclock(struct drm_crtc *crtc)
8138
{
8139
	struct drm_device *dev = crtc->dev;
8140
	drm_i915_private_t *dev_priv = dev->dev_private;
8141
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 8142
 
3031 serge 8143
	if (HAS_PCH_SPLIT(dev))
8144
		return;
2327 Serge 8145
 
3031 serge 8146
	if (!dev_priv->lvds_downclock_avail)
8147
		return;
2327 Serge 8148
 
3031 serge 8149
	/*
8150
	 * Since this is called by a timer, we should never get here in
8151
	 * the manual case.
8152
	 */
8153
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
8154
		int pipe = intel_crtc->pipe;
8155
		int dpll_reg = DPLL(pipe);
8156
		int dpll;
2327 Serge 8157
 
3031 serge 8158
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
2327 Serge 8159
 
3031 serge 8160
		assert_panel_unlocked(dev_priv, pipe);
2327 Serge 8161
 
3031 serge 8162
		dpll = I915_READ(dpll_reg);
8163
		dpll |= DISPLAY_RATE_SELECT_FPA1;
8164
		I915_WRITE(dpll_reg, dpll);
8165
		intel_wait_for_vblank(dev, pipe);
8166
		dpll = I915_READ(dpll_reg);
8167
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
8168
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
8169
	}
2327 Serge 8170
 
3031 serge 8171
}
2327 Serge 8172
 
3031 serge 8173
void intel_mark_busy(struct drm_device *dev)
8174
{
4104 Serge 8175
	struct drm_i915_private *dev_priv = dev->dev_private;
8176
 
8177
	hsw_package_c8_gpu_busy(dev_priv);
8178
	i915_update_gfx_val(dev_priv);
3031 serge 8179
}
2327 Serge 8180
 
3031 serge 8181
void intel_mark_idle(struct drm_device *dev)
8182
{
4104 Serge 8183
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 8184
	struct drm_crtc *crtc;
2327 Serge 8185
 
4104 Serge 8186
	hsw_package_c8_gpu_idle(dev_priv);
8187
 
3031 serge 8188
	if (!i915_powersave)
8189
		return;
2327 Serge 8190
 
3031 serge 8191
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8192
		if (!crtc->fb)
8193
			continue;
2327 Serge 8194
 
3480 Serge 8195
		intel_decrease_pllclock(crtc);
3031 serge 8196
	}
4560 Serge 8197
 
8198
	if (dev_priv->info->gen >= 6)
8199
		gen6_rps_idle(dev->dev_private);
3031 serge 8200
}
2327 Serge 8201
 
4104 Serge 8202
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
8203
			struct intel_ring_buffer *ring)
3031 serge 8204
{
8205
	struct drm_device *dev = obj->base.dev;
8206
	struct drm_crtc *crtc;
2327 Serge 8207
 
3031 serge 8208
	if (!i915_powersave)
8209
		return;
2327 Serge 8210
 
3031 serge 8211
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8212
		if (!crtc->fb)
8213
			continue;
2327 Serge 8214
 
4104 Serge 8215
		if (to_intel_framebuffer(crtc->fb)->obj != obj)
8216
			continue;
8217
 
3480 Serge 8218
			intel_increase_pllclock(crtc);
4104 Serge 8219
		if (ring && intel_fbc_enabled(dev))
8220
			ring->fbc_dirty = true;
3031 serge 8221
	}
8222
}
2327 Serge 8223
 
2330 Serge 8224
static void intel_crtc_destroy(struct drm_crtc *crtc)
8225
{
8226
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8227
	struct drm_device *dev = crtc->dev;
8228
	struct intel_unpin_work *work;
8229
	unsigned long flags;
2327 Serge 8230
 
2330 Serge 8231
	spin_lock_irqsave(&dev->event_lock, flags);
8232
	work = intel_crtc->unpin_work;
8233
	intel_crtc->unpin_work = NULL;
8234
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 8235
 
2330 Serge 8236
	if (work) {
4293 Serge 8237
		cancel_work_sync(&work->work);
2330 Serge 8238
		kfree(work);
8239
	}
2327 Serge 8240
 
2330 Serge 8241
	drm_crtc_cleanup(crtc);
2327 Serge 8242
 
2330 Serge 8243
	kfree(intel_crtc);
8244
}
2327 Serge 8245
 
3031 serge 8246
#if 0
8247
static void intel_unpin_work_fn(struct work_struct *__work)
8248
{
8249
	struct intel_unpin_work *work =
8250
		container_of(__work, struct intel_unpin_work, work);
3243 Serge 8251
	struct drm_device *dev = work->crtc->dev;
2327 Serge 8252
 
3243 Serge 8253
	mutex_lock(&dev->struct_mutex);
3031 serge 8254
	intel_unpin_fb_obj(work->old_fb_obj);
8255
	drm_gem_object_unreference(&work->pending_flip_obj->base);
8256
	drm_gem_object_unreference(&work->old_fb_obj->base);
2327 Serge 8257
 
3243 Serge 8258
	intel_update_fbc(dev);
8259
	mutex_unlock(&dev->struct_mutex);
8260
 
8261
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
8262
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
8263
 
3031 serge 8264
	kfree(work);
8265
}
2327 Serge 8266
 
3031 serge 8267
static void do_intel_finish_page_flip(struct drm_device *dev,
8268
				      struct drm_crtc *crtc)
8269
{
8270
	drm_i915_private_t *dev_priv = dev->dev_private;
8271
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8272
	struct intel_unpin_work *work;
8273
	unsigned long flags;
2327 Serge 8274
 
3031 serge 8275
	/* Ignore early vblank irqs */
8276
	if (intel_crtc == NULL)
8277
		return;
2327 Serge 8278
 
3031 serge 8279
	spin_lock_irqsave(&dev->event_lock, flags);
8280
	work = intel_crtc->unpin_work;
3243 Serge 8281
 
8282
	/* Ensure we don't miss a work->pending update ... */
8283
	smp_rmb();
8284
 
8285
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
3031 serge 8286
		spin_unlock_irqrestore(&dev->event_lock, flags);
8287
		return;
8288
	}
2327 Serge 8289
 
3243 Serge 8290
	/* and that the unpin work is consistent wrt ->pending. */
8291
	smp_rmb();
8292
 
3031 serge 8293
	intel_crtc->unpin_work = NULL;
2327 Serge 8294
 
3243 Serge 8295
	if (work->event)
8296
		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
2327 Serge 8297
 
3031 serge 8298
	drm_vblank_put(dev, intel_crtc->pipe);
2327 Serge 8299
 
3031 serge 8300
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 8301
 
3480 Serge 8302
	wake_up_all(&dev_priv->pending_flip_queue);
2327 Serge 8303
 
3243 Serge 8304
	queue_work(dev_priv->wq, &work->work);
8305
 
3031 serge 8306
	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
8307
}
2327 Serge 8308
 
3031 serge 8309
void intel_finish_page_flip(struct drm_device *dev, int pipe)
8310
{
8311
	drm_i915_private_t *dev_priv = dev->dev_private;
8312
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2327 Serge 8313
 
3031 serge 8314
	do_intel_finish_page_flip(dev, crtc);
8315
}
2327 Serge 8316
 
3031 serge 8317
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
8318
{
8319
	drm_i915_private_t *dev_priv = dev->dev_private;
8320
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
2327 Serge 8321
 
3031 serge 8322
	do_intel_finish_page_flip(dev, crtc);
8323
}
2327 Serge 8324
 
3031 serge 8325
void intel_prepare_page_flip(struct drm_device *dev, int plane)
8326
{
8327
	drm_i915_private_t *dev_priv = dev->dev_private;
8328
	struct intel_crtc *intel_crtc =
8329
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
8330
	unsigned long flags;
2327 Serge 8331
 
3243 Serge 8332
	/* NB: An MMIO update of the plane base pointer will also
8333
	 * generate a page-flip completion irq, i.e. every modeset
8334
	 * is also accompanied by a spurious intel_prepare_page_flip().
8335
	 */
3031 serge 8336
	spin_lock_irqsave(&dev->event_lock, flags);
3243 Serge 8337
	if (intel_crtc->unpin_work)
8338
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
3031 serge 8339
	spin_unlock_irqrestore(&dev->event_lock, flags);
8340
}
2327 Serge 8341
 
3243 Serge 8342
inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
8343
{
8344
	/* Ensure that the work item is consistent when activating it ... */
8345
	smp_wmb();
8346
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
8347
	/* and that it is marked active as soon as the irq could fire. */
8348
	smp_wmb();
8349
}
8350
 
3031 serge 8351
static int intel_gen2_queue_flip(struct drm_device *dev,
8352
				 struct drm_crtc *crtc,
8353
				 struct drm_framebuffer *fb,
4104 Serge 8354
				 struct drm_i915_gem_object *obj,
8355
				 uint32_t flags)
3031 serge 8356
{
8357
	struct drm_i915_private *dev_priv = dev->dev_private;
8358
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8359
	u32 flip_mask;
8360
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8361
	int ret;
2327 Serge 8362
 
3031 serge 8363
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8364
	if (ret)
8365
		goto err;
2327 Serge 8366
 
3031 serge 8367
	ret = intel_ring_begin(ring, 6);
8368
	if (ret)
8369
		goto err_unpin;
2327 Serge 8370
 
3031 serge 8371
	/* Can't queue multiple flips, so wait for the previous
8372
	 * one to finish before executing the next.
8373
	 */
8374
	if (intel_crtc->plane)
8375
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
8376
	else
8377
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
8378
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
8379
	intel_ring_emit(ring, MI_NOOP);
8380
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
8381
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8382
	intel_ring_emit(ring, fb->pitches[0]);
4104 Serge 8383
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
3031 serge 8384
	intel_ring_emit(ring, 0); /* aux display base address, unused */
3243 Serge 8385
 
8386
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 8387
	__intel_ring_advance(ring);
3031 serge 8388
	return 0;
2327 Serge 8389
 
3031 serge 8390
err_unpin:
8391
	intel_unpin_fb_obj(obj);
8392
err:
8393
	return ret;
8394
}
2327 Serge 8395
 
3031 serge 8396
static int intel_gen3_queue_flip(struct drm_device *dev,
8397
				 struct drm_crtc *crtc,
8398
				 struct drm_framebuffer *fb,
4104 Serge 8399
				 struct drm_i915_gem_object *obj,
8400
				 uint32_t flags)
3031 serge 8401
{
8402
	struct drm_i915_private *dev_priv = dev->dev_private;
8403
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8404
	u32 flip_mask;
8405
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8406
	int ret;
2327 Serge 8407
 
3031 serge 8408
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8409
	if (ret)
8410
		goto err;
2327 Serge 8411
 
3031 serge 8412
	ret = intel_ring_begin(ring, 6);
8413
	if (ret)
8414
		goto err_unpin;
2327 Serge 8415
 
3031 serge 8416
	if (intel_crtc->plane)
8417
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
8418
	else
8419
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
8420
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
8421
	intel_ring_emit(ring, MI_NOOP);
8422
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
8423
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8424
	intel_ring_emit(ring, fb->pitches[0]);
4104 Serge 8425
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
3031 serge 8426
	intel_ring_emit(ring, MI_NOOP);
2327 Serge 8427
 
3243 Serge 8428
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 8429
	__intel_ring_advance(ring);
3031 serge 8430
	return 0;
2327 Serge 8431
 
3031 serge 8432
err_unpin:
8433
	intel_unpin_fb_obj(obj);
8434
err:
8435
	return ret;
8436
}
2327 Serge 8437
 
3031 serge 8438
static int intel_gen4_queue_flip(struct drm_device *dev,
8439
				 struct drm_crtc *crtc,
8440
				 struct drm_framebuffer *fb,
4104 Serge 8441
				 struct drm_i915_gem_object *obj,
8442
				 uint32_t flags)
3031 serge 8443
{
8444
	struct drm_i915_private *dev_priv = dev->dev_private;
8445
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8446
	uint32_t pf, pipesrc;
8447
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8448
	int ret;
2327 Serge 8449
 
3031 serge 8450
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8451
	if (ret)
8452
		goto err;
2327 Serge 8453
 
3031 serge 8454
	ret = intel_ring_begin(ring, 4);
8455
	if (ret)
8456
		goto err_unpin;
2327 Serge 8457
 
3031 serge 8458
	/* i965+ uses the linear or tiled offsets from the
8459
	 * Display Registers (which do not change across a page-flip)
8460
	 * so we need only reprogram the base address.
8461
	 */
8462
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
8463
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8464
	intel_ring_emit(ring, fb->pitches[0]);
8465
	intel_ring_emit(ring,
4104 Serge 8466
			(i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
3031 serge 8467
			obj->tiling_mode);
2327 Serge 8468
 
3031 serge 8469
	/* XXX Enabling the panel-fitter across page-flip is so far
8470
	 * untested on non-native modes, so ignore it for now.
8471
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
8472
	 */
8473
	pf = 0;
8474
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
8475
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 8476
 
8477
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 8478
	__intel_ring_advance(ring);
3031 serge 8479
	return 0;
2327 Serge 8480
 
3031 serge 8481
err_unpin:
8482
	intel_unpin_fb_obj(obj);
8483
err:
8484
	return ret;
8485
}
2327 Serge 8486
 
3031 serge 8487
static int intel_gen6_queue_flip(struct drm_device *dev,
8488
				 struct drm_crtc *crtc,
8489
				 struct drm_framebuffer *fb,
4104 Serge 8490
				 struct drm_i915_gem_object *obj,
8491
				 uint32_t flags)
3031 serge 8492
{
8493
	struct drm_i915_private *dev_priv = dev->dev_private;
8494
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8495
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8496
	uint32_t pf, pipesrc;
8497
	int ret;
2327 Serge 8498
 
3031 serge 8499
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8500
	if (ret)
8501
		goto err;
2327 Serge 8502
 
3031 serge 8503
	ret = intel_ring_begin(ring, 4);
8504
	if (ret)
8505
		goto err_unpin;
2327 Serge 8506
 
3031 serge 8507
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
8508
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8509
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
4104 Serge 8510
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2327 Serge 8511
 
3031 serge 8512
	/* Contrary to the suggestions in the documentation,
8513
	 * "Enable Panel Fitter" does not seem to be required when page
8514
	 * flipping with a non-native mode, and worse causes a normal
8515
	 * modeset to fail.
8516
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
8517
	 */
8518
	pf = 0;
8519
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
8520
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 8521
 
8522
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 8523
	__intel_ring_advance(ring);
3031 serge 8524
	return 0;
2327 Serge 8525
 
3031 serge 8526
err_unpin:
8527
	intel_unpin_fb_obj(obj);
8528
err:
8529
	return ret;
8530
}
2327 Serge 8531
 
3031 serge 8532
static int intel_gen7_queue_flip(struct drm_device *dev,
8533
				 struct drm_crtc *crtc,
8534
				 struct drm_framebuffer *fb,
4104 Serge 8535
				 struct drm_i915_gem_object *obj,
8536
				 uint32_t flags)
3031 serge 8537
{
8538
	struct drm_i915_private *dev_priv = dev->dev_private;
8539
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4104 Serge 8540
	struct intel_ring_buffer *ring;
3031 serge 8541
	uint32_t plane_bit = 0;
4104 Serge 8542
	int len, ret;
2327 Serge 8543
 
4104 Serge 8544
	ring = obj->ring;
8545
	if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
8546
		ring = &dev_priv->ring[BCS];
8547
 
3031 serge 8548
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8549
	if (ret)
8550
		goto err;
2327 Serge 8551
 
3031 serge 8552
	switch(intel_crtc->plane) {
8553
	case PLANE_A:
8554
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
8555
		break;
8556
	case PLANE_B:
8557
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
8558
		break;
8559
	case PLANE_C:
8560
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
8561
		break;
8562
	default:
8563
		WARN_ONCE(1, "unknown plane in flip command\n");
8564
		ret = -ENODEV;
8565
		goto err_unpin;
8566
	}
2327 Serge 8567
 
4104 Serge 8568
	len = 4;
8569
	if (ring->id == RCS)
8570
		len += 6;
8571
 
8572
	ret = intel_ring_begin(ring, len);
3031 serge 8573
	if (ret)
8574
		goto err_unpin;
2327 Serge 8575
 
4104 Serge 8576
	/* Unmask the flip-done completion message. Note that the bspec says that
8577
	 * we should do this for both the BCS and RCS, and that we must not unmask
8578
	 * more than one flip event at any time (or ensure that one flip message
8579
	 * can be sent by waiting for flip-done prior to queueing new flips).
8580
	 * Experimentation says that BCS works despite DERRMR masking all
8581
	 * flip-done completion events and that unmasking all planes at once
8582
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
8583
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
8584
	 */
8585
	if (ring->id == RCS) {
8586
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
8587
		intel_ring_emit(ring, DERRMR);
8588
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8589
					DERRMR_PIPEB_PRI_FLIP_DONE |
8590
					DERRMR_PIPEC_PRI_FLIP_DONE));
4560 Serge 8591
		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
8592
				MI_SRM_LRM_GLOBAL_GTT);
4104 Serge 8593
		intel_ring_emit(ring, DERRMR);
8594
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8595
	}
8596
 
3031 serge 8597
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
8598
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
4104 Serge 8599
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
3031 serge 8600
	intel_ring_emit(ring, (MI_NOOP));
3243 Serge 8601
 
8602
	intel_mark_page_flip_active(intel_crtc);
4560 Serge 8603
	__intel_ring_advance(ring);
3031 serge 8604
	return 0;
2327 Serge 8605
 
3031 serge 8606
err_unpin:
8607
	intel_unpin_fb_obj(obj);
8608
err:
8609
	return ret;
8610
}
2327 Serge 8611
 
3031 serge 8612
static int intel_default_queue_flip(struct drm_device *dev,
8613
				    struct drm_crtc *crtc,
8614
				    struct drm_framebuffer *fb,
4104 Serge 8615
				    struct drm_i915_gem_object *obj,
8616
				    uint32_t flags)
3031 serge 8617
{
8618
	return -ENODEV;
8619
}
2327 Serge 8620
 
3031 serge 8621
static int intel_crtc_page_flip(struct drm_crtc *crtc,
8622
				struct drm_framebuffer *fb,
4104 Serge 8623
				struct drm_pending_vblank_event *event,
8624
				uint32_t page_flip_flags)
3031 serge 8625
{
8626
	struct drm_device *dev = crtc->dev;
8627
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 8628
	struct drm_framebuffer *old_fb = crtc->fb;
8629
	struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
3031 serge 8630
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8631
	struct intel_unpin_work *work;
8632
	unsigned long flags;
8633
	int ret;
2327 Serge 8634
 
3031 serge 8635
	/* Can't change pixel format via MI display flips. */
8636
	if (fb->pixel_format != crtc->fb->pixel_format)
8637
		return -EINVAL;
2327 Serge 8638
 
3031 serge 8639
	/*
8640
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
8641
	 * Note that pitch changes could also affect these register.
8642
	 */
8643
	if (INTEL_INFO(dev)->gen > 3 &&
8644
	    (fb->offsets[0] != crtc->fb->offsets[0] ||
8645
	     fb->pitches[0] != crtc->fb->pitches[0]))
8646
		return -EINVAL;
2327 Serge 8647
 
4560 Serge 8648
	work = kzalloc(sizeof(*work), GFP_KERNEL);
3031 serge 8649
	if (work == NULL)
8650
		return -ENOMEM;
2327 Serge 8651
 
3031 serge 8652
	work->event = event;
3243 Serge 8653
	work->crtc = crtc;
3480 Serge 8654
	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
3031 serge 8655
	INIT_WORK(&work->work, intel_unpin_work_fn);
2327 Serge 8656
 
3031 serge 8657
	ret = drm_vblank_get(dev, intel_crtc->pipe);
8658
	if (ret)
8659
		goto free_work;
2327 Serge 8660
 
3031 serge 8661
	/* We borrow the event spin lock for protecting unpin_work */
8662
	spin_lock_irqsave(&dev->event_lock, flags);
8663
	if (intel_crtc->unpin_work) {
8664
		spin_unlock_irqrestore(&dev->event_lock, flags);
8665
		kfree(work);
8666
		drm_vblank_put(dev, intel_crtc->pipe);
2327 Serge 8667
 
3031 serge 8668
		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
8669
		return -EBUSY;
8670
	}
8671
	intel_crtc->unpin_work = work;
8672
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 8673
 
3243 Serge 8674
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
8675
		flush_workqueue(dev_priv->wq);
8676
 
3031 serge 8677
	ret = i915_mutex_lock_interruptible(dev);
8678
	if (ret)
8679
		goto cleanup;
2327 Serge 8680
 
3031 serge 8681
	/* Reference the objects for the scheduled work. */
8682
	drm_gem_object_reference(&work->old_fb_obj->base);
8683
	drm_gem_object_reference(&obj->base);
2327 Serge 8684
 
3031 serge 8685
	crtc->fb = fb;
2327 Serge 8686
 
3031 serge 8687
	work->pending_flip_obj = obj;
2327 Serge 8688
 
3031 serge 8689
	work->enable_stall_check = true;
8690
 
3243 Serge 8691
	atomic_inc(&intel_crtc->unpin_work_count);
3480 Serge 8692
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3031 serge 8693
 
4104 Serge 8694
	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
3031 serge 8695
	if (ret)
8696
		goto cleanup_pending;
8697
 
8698
	intel_disable_fbc(dev);
4104 Serge 8699
	intel_mark_fb_busy(obj, NULL);
3031 serge 8700
	mutex_unlock(&dev->struct_mutex);
8701
 
8702
	trace_i915_flip_request(intel_crtc->plane, obj);
8703
 
8704
	return 0;
8705
 
8706
cleanup_pending:
3243 Serge 8707
	atomic_dec(&intel_crtc->unpin_work_count);
3480 Serge 8708
	crtc->fb = old_fb;
3031 serge 8709
	drm_gem_object_unreference(&work->old_fb_obj->base);
8710
	drm_gem_object_unreference(&obj->base);
8711
	mutex_unlock(&dev->struct_mutex);
8712
 
8713
cleanup:
8714
	spin_lock_irqsave(&dev->event_lock, flags);
8715
	intel_crtc->unpin_work = NULL;
8716
	spin_unlock_irqrestore(&dev->event_lock, flags);
8717
 
8718
	drm_vblank_put(dev, intel_crtc->pipe);
8719
free_work:
8720
	kfree(work);
8721
 
8722
	return ret;
8723
}
8724
#endif
8725
 
8726
static struct drm_crtc_helper_funcs intel_helper_funcs = {
8727
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
8728
	.load_lut = intel_crtc_load_lut,
8729
};
8730
 
8731
/**
8732
 * intel_modeset_update_staged_output_state
8733
 *
8734
 * Updates the staged output configuration state, e.g. after we've read out the
8735
 * current hw state.
8736
 */
8737
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8738
{
8739
	struct intel_encoder *encoder;
8740
	struct intel_connector *connector;
8741
 
8742
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8743
			    base.head) {
8744
		connector->new_encoder =
8745
			to_intel_encoder(connector->base.encoder);
8746
	}
8747
 
8748
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8749
			    base.head) {
8750
		encoder->new_crtc =
8751
			to_intel_crtc(encoder->base.crtc);
8752
	}
8753
}
8754
 
8755
/**
8756
 * intel_modeset_commit_output_state
8757
 *
8758
 * This function copies the stage display pipe configuration to the real one.
8759
 */
8760
static void intel_modeset_commit_output_state(struct drm_device *dev)
8761
{
8762
	struct intel_encoder *encoder;
8763
	struct intel_connector *connector;
8764
 
8765
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8766
			    base.head) {
8767
		connector->base.encoder = &connector->new_encoder->base;
8768
	}
8769
 
8770
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8771
			    base.head) {
8772
		encoder->base.crtc = &encoder->new_crtc->base;
8773
	}
8774
}
8775
 
4104 Serge 8776
static void
8777
connected_sink_compute_bpp(struct intel_connector * connector,
8778
			   struct intel_crtc_config *pipe_config)
8779
{
8780
	int bpp = pipe_config->pipe_bpp;
8781
 
8782
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
8783
		connector->base.base.id,
8784
		drm_get_connector_name(&connector->base));
8785
 
8786
	/* Don't use an invalid EDID bpc value */
8787
	if (connector->base.display_info.bpc &&
8788
	    connector->base.display_info.bpc * 3 < bpp) {
8789
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
8790
			      bpp, connector->base.display_info.bpc*3);
8791
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
8792
	}
8793
 
8794
	/* Clamp bpp to 8 on screens without EDID 1.4 */
8795
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
8796
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
8797
			      bpp);
8798
		pipe_config->pipe_bpp = 24;
8799
	}
8800
}
8801
 
3746 Serge 8802
static int
4104 Serge 8803
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
3746 Serge 8804
		    struct drm_framebuffer *fb,
8805
		    struct intel_crtc_config *pipe_config)
8806
{
4104 Serge 8807
	struct drm_device *dev = crtc->base.dev;
8808
	struct intel_connector *connector;
3746 Serge 8809
	int bpp;
8810
 
8811
	switch (fb->pixel_format) {
8812
	case DRM_FORMAT_C8:
8813
		bpp = 8*3; /* since we go through a colormap */
8814
		break;
8815
	case DRM_FORMAT_XRGB1555:
8816
	case DRM_FORMAT_ARGB1555:
8817
		/* checked in intel_framebuffer_init already */
8818
		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
8819
			return -EINVAL;
8820
	case DRM_FORMAT_RGB565:
8821
		bpp = 6*3; /* min is 18bpp */
8822
		break;
8823
	case DRM_FORMAT_XBGR8888:
8824
	case DRM_FORMAT_ABGR8888:
8825
		/* checked in intel_framebuffer_init already */
8826
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8827
			return -EINVAL;
8828
	case DRM_FORMAT_XRGB8888:
8829
	case DRM_FORMAT_ARGB8888:
8830
		bpp = 8*3;
8831
		break;
8832
	case DRM_FORMAT_XRGB2101010:
8833
	case DRM_FORMAT_ARGB2101010:
8834
	case DRM_FORMAT_XBGR2101010:
8835
	case DRM_FORMAT_ABGR2101010:
8836
		/* checked in intel_framebuffer_init already */
8837
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8838
			return -EINVAL;
8839
		bpp = 10*3;
8840
		break;
8841
	/* TODO: gen4+ supports 16 bpc floating point, too. */
8842
	default:
8843
		DRM_DEBUG_KMS("unsupported depth\n");
8844
		return -EINVAL;
8845
	}
8846
 
8847
	pipe_config->pipe_bpp = bpp;
8848
 
8849
	/* Clamp display bpp to EDID value */
8850
	list_for_each_entry(connector, &dev->mode_config.connector_list,
4104 Serge 8851
			    base.head) {
8852
		if (!connector->new_encoder ||
8853
		    connector->new_encoder->new_crtc != crtc)
3746 Serge 8854
			continue;
8855
 
4104 Serge 8856
		connected_sink_compute_bpp(connector, pipe_config);
3746 Serge 8857
	}
8858
 
8859
	return bpp;
8860
}
8861
 
4560 Serge 8862
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
8863
{
8864
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
8865
			"type: 0x%x flags: 0x%x\n",
8866
		mode->crtc_clock,
8867
		mode->crtc_hdisplay, mode->crtc_hsync_start,
8868
		mode->crtc_hsync_end, mode->crtc_htotal,
8869
		mode->crtc_vdisplay, mode->crtc_vsync_start,
8870
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
8871
}
8872
 
4104 Serge 8873
static void intel_dump_pipe_config(struct intel_crtc *crtc,
8874
				   struct intel_crtc_config *pipe_config,
8875
				   const char *context)
8876
{
8877
	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
8878
		      context, pipe_name(crtc->pipe));
8879
 
8880
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
8881
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
8882
		      pipe_config->pipe_bpp, pipe_config->dither);
8883
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8884
		      pipe_config->has_pch_encoder,
8885
		      pipe_config->fdi_lanes,
8886
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
8887
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
8888
		      pipe_config->fdi_m_n.tu);
4560 Serge 8889
	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8890
		      pipe_config->has_dp_encoder,
8891
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
8892
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
8893
		      pipe_config->dp_m_n.tu);
4104 Serge 8894
	DRM_DEBUG_KMS("requested mode:\n");
8895
	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
8896
	DRM_DEBUG_KMS("adjusted mode:\n");
8897
	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
4560 Serge 8898
	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
8899
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
8900
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
8901
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
4104 Serge 8902
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8903
		      pipe_config->gmch_pfit.control,
8904
		      pipe_config->gmch_pfit.pgm_ratios,
8905
		      pipe_config->gmch_pfit.lvds_border_bits);
8906
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
8907
		      pipe_config->pch_pfit.pos,
8908
		      pipe_config->pch_pfit.size,
8909
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
8910
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
4560 Serge 8911
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
4104 Serge 8912
}
8913
 
8914
static bool check_encoder_cloning(struct drm_crtc *crtc)
8915
{
8916
	int num_encoders = 0;
8917
	bool uncloneable_encoders = false;
8918
	struct intel_encoder *encoder;
8919
 
8920
	list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
8921
			    base.head) {
8922
		if (&encoder->new_crtc->base != crtc)
8923
			continue;
8924
 
8925
		num_encoders++;
8926
		if (!encoder->cloneable)
8927
			uncloneable_encoders = true;
8928
	}
8929
 
8930
	return !(num_encoders > 1 && uncloneable_encoders);
8931
}
8932
 
3746 Serge 8933
static struct intel_crtc_config *
8934
intel_modeset_pipe_config(struct drm_crtc *crtc,
8935
			  struct drm_framebuffer *fb,
3031 serge 8936
			    struct drm_display_mode *mode)
8937
{
8938
	struct drm_device *dev = crtc->dev;
8939
	struct intel_encoder *encoder;
3746 Serge 8940
	struct intel_crtc_config *pipe_config;
4104 Serge 8941
	int plane_bpp, ret = -EINVAL;
8942
	bool retry = true;
3031 serge 8943
 
4104 Serge 8944
	if (!check_encoder_cloning(crtc)) {
8945
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
8946
		return ERR_PTR(-EINVAL);
8947
	}
8948
 
3746 Serge 8949
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8950
	if (!pipe_config)
3031 serge 8951
		return ERR_PTR(-ENOMEM);
8952
 
3746 Serge 8953
	drm_mode_copy(&pipe_config->adjusted_mode, mode);
8954
	drm_mode_copy(&pipe_config->requested_mode, mode);
4560 Serge 8955
 
4104 Serge 8956
	pipe_config->cpu_transcoder =
8957
		(enum transcoder) to_intel_crtc(crtc)->pipe;
8958
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
3746 Serge 8959
 
4104 Serge 8960
	/*
8961
	 * Sanitize sync polarity flags based on requested ones. If neither
8962
	 * positive or negative polarity is requested, treat this as meaning
8963
	 * negative polarity.
8964
	 */
8965
	if (!(pipe_config->adjusted_mode.flags &
8966
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8967
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8968
 
8969
	if (!(pipe_config->adjusted_mode.flags &
8970
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8971
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8972
 
8973
	/* Compute a starting value for pipe_config->pipe_bpp taking the source
8974
	 * plane pixel format and any sink constraints into account. Returns the
8975
	 * source plane bpp so that dithering can be selected on mismatches
8976
	 * after encoders and crtc also have had their say. */
8977
	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
8978
					      fb, pipe_config);
3746 Serge 8979
	if (plane_bpp < 0)
8980
		goto fail;
8981
 
4560 Serge 8982
	/*
8983
	 * Determine the real pipe dimensions. Note that stereo modes can
8984
	 * increase the actual pipe size due to the frame doubling and
8985
	 * insertion of additional space for blanks between the frame. This
8986
	 * is stored in the crtc timings. We use the requested mode to do this
8987
	 * computation to clearly distinguish it from the adjusted mode, which
8988
	 * can be changed by the connectors in the below retry loop.
8989
	 */
8990
	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
8991
	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
8992
	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
8993
 
4104 Serge 8994
encoder_retry:
8995
	/* Ensure the port clock defaults are reset when retrying. */
8996
	pipe_config->port_clock = 0;
8997
	pipe_config->pixel_multiplier = 1;
8998
 
8999
	/* Fill in default crtc timings, allow encoders to overwrite them. */
4560 Serge 9000
	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
4104 Serge 9001
 
3031 serge 9002
	/* Pass our mode to the connectors and the CRTC to give them a chance to
9003
	 * adjust it according to limitations or connector properties, and also
9004
	 * a chance to reject the mode entirely.
2330 Serge 9005
	 */
3031 serge 9006
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9007
			    base.head) {
2327 Serge 9008
 
3031 serge 9009
		if (&encoder->new_crtc->base != crtc)
9010
			continue;
3746 Serge 9011
 
9012
			if (!(encoder->compute_config(encoder, pipe_config))) {
9013
				DRM_DEBUG_KMS("Encoder config failure\n");
9014
				goto fail;
9015
			}
9016
		}
9017
 
4104 Serge 9018
	/* Set default port clock if not overwritten by the encoder. Needs to be
9019
	 * done afterwards in case the encoder adjusts the mode. */
9020
	if (!pipe_config->port_clock)
4560 Serge 9021
		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
9022
			* pipe_config->pixel_multiplier;
2327 Serge 9023
 
4104 Serge 9024
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
9025
	if (ret < 0) {
3031 serge 9026
		DRM_DEBUG_KMS("CRTC fixup failed\n");
9027
		goto fail;
9028
	}
2327 Serge 9029
 
4104 Serge 9030
	if (ret == RETRY) {
9031
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
9032
			ret = -EINVAL;
9033
			goto fail;
9034
		}
9035
 
9036
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
9037
		retry = false;
9038
		goto encoder_retry;
9039
	}
9040
 
3746 Serge 9041
	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
9042
	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
9043
		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
9044
 
9045
	return pipe_config;
3031 serge 9046
fail:
3746 Serge 9047
	kfree(pipe_config);
4104 Serge 9048
	return ERR_PTR(ret);
3031 serge 9049
}
2327 Serge 9050
 
3031 serge 9051
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
9052
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
9053
static void
9054
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9055
			     unsigned *prepare_pipes, unsigned *disable_pipes)
9056
{
9057
	struct intel_crtc *intel_crtc;
9058
	struct drm_device *dev = crtc->dev;
9059
	struct intel_encoder *encoder;
9060
	struct intel_connector *connector;
9061
	struct drm_crtc *tmp_crtc;
9062
 
9063
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
9064
 
9065
	/* Check which crtcs have changed outputs connected to them, these need
9066
	 * to be part of the prepare_pipes mask. We don't (yet) support global
9067
	 * modeset across multiple crtcs, so modeset_pipes will only have one
9068
	 * bit set at most. */
9069
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9070
			    base.head) {
9071
		if (connector->base.encoder == &connector->new_encoder->base)
9072
			continue;
9073
 
9074
		if (connector->base.encoder) {
9075
			tmp_crtc = connector->base.encoder->crtc;
9076
 
9077
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9078
		}
9079
 
9080
		if (connector->new_encoder)
9081
			*prepare_pipes |=
9082
				1 << connector->new_encoder->new_crtc->pipe;
9083
	}
9084
 
9085
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9086
			    base.head) {
9087
		if (encoder->base.crtc == &encoder->new_crtc->base)
9088
			continue;
9089
 
9090
		if (encoder->base.crtc) {
9091
			tmp_crtc = encoder->base.crtc;
9092
 
9093
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9094
		}
9095
 
9096
		if (encoder->new_crtc)
9097
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
9098
	}
9099
 
9100
	/* Check for any pipes that will be fully disabled ... */
9101
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9102
			    base.head) {
9103
		bool used = false;
9104
 
9105
		/* Don't try to disable disabled crtcs. */
9106
		if (!intel_crtc->base.enabled)
9107
			continue;
9108
 
9109
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9110
				    base.head) {
9111
			if (encoder->new_crtc == intel_crtc)
9112
				used = true;
9113
		}
9114
 
9115
		if (!used)
9116
			*disable_pipes |= 1 << intel_crtc->pipe;
9117
	}
9118
 
9119
 
9120
	/* set_mode is also used to update properties on life display pipes. */
9121
	intel_crtc = to_intel_crtc(crtc);
9122
	if (crtc->enabled)
9123
		*prepare_pipes |= 1 << intel_crtc->pipe;
9124
 
3746 Serge 9125
	/*
9126
	 * For simplicity do a full modeset on any pipe where the output routing
9127
	 * changed. We could be more clever, but that would require us to be
9128
	 * more careful with calling the relevant encoder->mode_set functions.
9129
	 */
3031 serge 9130
	if (*prepare_pipes)
9131
		*modeset_pipes = *prepare_pipes;
9132
 
9133
	/* ... and mask these out. */
9134
	*modeset_pipes &= ~(*disable_pipes);
9135
	*prepare_pipes &= ~(*disable_pipes);
3746 Serge 9136
 
9137
	/*
9138
	 * HACK: We don't (yet) fully support global modesets. intel_set_config
9139
	 * obies this rule, but the modeset restore mode of
9140
	 * intel_modeset_setup_hw_state does not.
9141
	 */
9142
	*modeset_pipes &= 1 << intel_crtc->pipe;
9143
	*prepare_pipes &= 1 << intel_crtc->pipe;
4104 Serge 9144
 
9145
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
9146
		      *modeset_pipes, *prepare_pipes, *disable_pipes);
2330 Serge 9147
}
2327 Serge 9148
 
3031 serge 9149
static bool intel_crtc_in_use(struct drm_crtc *crtc)
2330 Serge 9150
{
3031 serge 9151
	struct drm_encoder *encoder;
2330 Serge 9152
	struct drm_device *dev = crtc->dev;
2327 Serge 9153
 
3031 serge 9154
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
9155
		if (encoder->crtc == crtc)
9156
			return true;
9157
 
9158
	return false;
9159
}
9160
 
9161
static void
9162
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
9163
{
9164
	struct intel_encoder *intel_encoder;
9165
	struct intel_crtc *intel_crtc;
9166
	struct drm_connector *connector;
9167
 
9168
	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
9169
			    base.head) {
9170
		if (!intel_encoder->base.crtc)
9171
			continue;
9172
 
9173
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
9174
 
9175
		if (prepare_pipes & (1 << intel_crtc->pipe))
9176
			intel_encoder->connectors_active = false;
9177
	}
9178
 
9179
	intel_modeset_commit_output_state(dev);
9180
 
9181
	/* Update computed state. */
9182
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9183
			    base.head) {
9184
		intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
9185
	}
9186
 
9187
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9188
		if (!connector->encoder || !connector->encoder->crtc)
9189
			continue;
9190
 
9191
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
9192
 
9193
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
9194
			struct drm_property *dpms_property =
9195
				dev->mode_config.dpms_property;
9196
 
9197
			connector->dpms = DRM_MODE_DPMS_ON;
3243 Serge 9198
			drm_object_property_set_value(&connector->base,
3031 serge 9199
							 dpms_property,
9200
							 DRM_MODE_DPMS_ON);
9201
 
9202
			intel_encoder = to_intel_encoder(connector->encoder);
9203
			intel_encoder->connectors_active = true;
9204
		}
9205
	}
9206
 
9207
}
9208
 
4560 Serge 9209
static bool intel_fuzzy_clock_check(int clock1, int clock2)
4104 Serge 9210
{
4560 Serge 9211
	int diff;
4104 Serge 9212
 
9213
	if (clock1 == clock2)
9214
		return true;
9215
 
9216
	if (!clock1 || !clock2)
9217
		return false;
9218
 
9219
	diff = abs(clock1 - clock2);
9220
 
9221
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
9222
		return true;
9223
 
9224
	return false;
9225
}
9226
 
3031 serge 9227
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
9228
	list_for_each_entry((intel_crtc), \
9229
			    &(dev)->mode_config.crtc_list, \
9230
			    base.head) \
4104 Serge 9231
		if (mask & (1 <<(intel_crtc)->pipe))
3031 serge 9232
 
3746 Serge 9233
static bool
4104 Serge 9234
intel_pipe_config_compare(struct drm_device *dev,
9235
			  struct intel_crtc_config *current_config,
3746 Serge 9236
			  struct intel_crtc_config *pipe_config)
9237
{
4104 Serge 9238
#define PIPE_CONF_CHECK_X(name)	\
9239
	if (current_config->name != pipe_config->name) { \
9240
		DRM_ERROR("mismatch in " #name " " \
9241
			  "(expected 0x%08x, found 0x%08x)\n", \
9242
			  current_config->name, \
9243
			  pipe_config->name); \
9244
		return false; \
3746 Serge 9245
	}
9246
 
4104 Serge 9247
#define PIPE_CONF_CHECK_I(name)	\
9248
	if (current_config->name != pipe_config->name) { \
9249
		DRM_ERROR("mismatch in " #name " " \
9250
			  "(expected %i, found %i)\n", \
9251
			  current_config->name, \
9252
			  pipe_config->name); \
9253
		return false; \
9254
	}
9255
 
9256
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
9257
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
9258
		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
9259
			  "(expected %i, found %i)\n", \
9260
			  current_config->name & (mask), \
9261
			  pipe_config->name & (mask)); \
9262
		return false; \
9263
	}
9264
 
4560 Serge 9265
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
9266
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
9267
		DRM_ERROR("mismatch in " #name " " \
9268
			  "(expected %i, found %i)\n", \
9269
			  current_config->name, \
9270
			  pipe_config->name); \
9271
		return false; \
9272
	}
9273
 
4104 Serge 9274
#define PIPE_CONF_QUIRK(quirk)	\
9275
	((current_config->quirks | pipe_config->quirks) & (quirk))
9276
 
9277
	PIPE_CONF_CHECK_I(cpu_transcoder);
9278
 
9279
	PIPE_CONF_CHECK_I(has_pch_encoder);
9280
	PIPE_CONF_CHECK_I(fdi_lanes);
9281
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
9282
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
9283
	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
9284
	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
9285
	PIPE_CONF_CHECK_I(fdi_m_n.tu);
9286
 
4560 Serge 9287
	PIPE_CONF_CHECK_I(has_dp_encoder);
9288
	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
9289
	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
9290
	PIPE_CONF_CHECK_I(dp_m_n.link_m);
9291
	PIPE_CONF_CHECK_I(dp_m_n.link_n);
9292
	PIPE_CONF_CHECK_I(dp_m_n.tu);
9293
 
4104 Serge 9294
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
9295
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
9296
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
9297
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
9298
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
9299
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
9300
 
9301
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
9302
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
9303
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
9304
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
9305
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
9306
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
9307
 
9308
		PIPE_CONF_CHECK_I(pixel_multiplier);
9309
 
9310
	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9311
			      DRM_MODE_FLAG_INTERLACE);
9312
 
9313
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
9314
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9315
				      DRM_MODE_FLAG_PHSYNC);
9316
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9317
				      DRM_MODE_FLAG_NHSYNC);
9318
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9319
				      DRM_MODE_FLAG_PVSYNC);
9320
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9321
				      DRM_MODE_FLAG_NVSYNC);
9322
	}
9323
 
4560 Serge 9324
	PIPE_CONF_CHECK_I(pipe_src_w);
9325
	PIPE_CONF_CHECK_I(pipe_src_h);
4104 Serge 9326
 
9327
	PIPE_CONF_CHECK_I(gmch_pfit.control);
9328
	/* pfit ratios are autocomputed by the hw on gen4+ */
9329
	if (INTEL_INFO(dev)->gen < 4)
9330
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
9331
	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
9332
	PIPE_CONF_CHECK_I(pch_pfit.enabled);
9333
	if (current_config->pch_pfit.enabled) {
9334
	PIPE_CONF_CHECK_I(pch_pfit.pos);
9335
	PIPE_CONF_CHECK_I(pch_pfit.size);
9336
	}
9337
 
4560 Serge 9338
	/* BDW+ don't expose a synchronous way to read the state */
9339
	if (IS_HASWELL(dev))
4104 Serge 9340
	PIPE_CONF_CHECK_I(ips_enabled);
9341
 
4560 Serge 9342
	PIPE_CONF_CHECK_I(double_wide);
9343
 
4104 Serge 9344
	PIPE_CONF_CHECK_I(shared_dpll);
9345
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
9346
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
9347
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
9348
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
9349
 
4280 Serge 9350
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9351
		PIPE_CONF_CHECK_I(pipe_bpp);
9352
 
4560 Serge 9353
	if (!HAS_DDI(dev)) {
9354
		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9355
		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9356
	}
9357
 
4104 Serge 9358
#undef PIPE_CONF_CHECK_X
9359
#undef PIPE_CONF_CHECK_I
9360
#undef PIPE_CONF_CHECK_FLAGS
4560 Serge 9361
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
4104 Serge 9362
#undef PIPE_CONF_QUIRK
9363
 
3746 Serge 9364
	return true;
9365
}
9366
 
4104 Serge 9367
static void
9368
check_connector_state(struct drm_device *dev)
3031 serge 9369
{
9370
	struct intel_connector *connector;
9371
 
9372
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9373
			    base.head) {
9374
		/* This also checks the encoder/connector hw state with the
9375
		 * ->get_hw_state callbacks. */
9376
		intel_connector_check_state(connector);
9377
 
9378
		WARN(&connector->new_encoder->base != connector->base.encoder,
9379
		     "connector's staged encoder doesn't match current encoder\n");
9380
	}
4104 Serge 9381
}
3031 serge 9382
 
4104 Serge 9383
static void
9384
check_encoder_state(struct drm_device *dev)
9385
{
9386
	struct intel_encoder *encoder;
9387
	struct intel_connector *connector;
9388
 
3031 serge 9389
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9390
			    base.head) {
9391
		bool enabled = false;
9392
		bool active = false;
9393
		enum pipe pipe, tracked_pipe;
9394
 
9395
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
9396
			      encoder->base.base.id,
9397
			      drm_get_encoder_name(&encoder->base));
9398
 
9399
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
9400
		     "encoder's stage crtc doesn't match current crtc\n");
9401
		WARN(encoder->connectors_active && !encoder->base.crtc,
9402
		     "encoder's active_connectors set, but no crtc\n");
9403
 
9404
		list_for_each_entry(connector, &dev->mode_config.connector_list,
9405
				    base.head) {
9406
			if (connector->base.encoder != &encoder->base)
9407
				continue;
9408
			enabled = true;
9409
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
9410
				active = true;
9411
		}
9412
		WARN(!!encoder->base.crtc != enabled,
9413
		     "encoder's enabled state mismatch "
9414
		     "(expected %i, found %i)\n",
9415
		     !!encoder->base.crtc, enabled);
9416
		WARN(active && !encoder->base.crtc,
9417
		     "active encoder with no crtc\n");
9418
 
9419
		WARN(encoder->connectors_active != active,
9420
		     "encoder's computed active state doesn't match tracked active state "
9421
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
9422
 
9423
		active = encoder->get_hw_state(encoder, &pipe);
9424
		WARN(active != encoder->connectors_active,
9425
		     "encoder's hw state doesn't match sw tracking "
9426
		     "(expected %i, found %i)\n",
9427
		     encoder->connectors_active, active);
9428
 
9429
		if (!encoder->base.crtc)
9430
			continue;
9431
 
9432
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
9433
		WARN(active && pipe != tracked_pipe,
9434
		     "active encoder's pipe doesn't match"
9435
		     "(expected %i, found %i)\n",
9436
		     tracked_pipe, pipe);
9437
 
9438
	}
4104 Serge 9439
}
3031 serge 9440
 
4104 Serge 9441
static void
9442
check_crtc_state(struct drm_device *dev)
9443
{
9444
	drm_i915_private_t *dev_priv = dev->dev_private;
9445
	struct intel_crtc *crtc;
9446
	struct intel_encoder *encoder;
9447
	struct intel_crtc_config pipe_config;
9448
 
3031 serge 9449
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9450
			    base.head) {
9451
		bool enabled = false;
9452
		bool active = false;
9453
 
4104 Serge 9454
		memset(&pipe_config, 0, sizeof(pipe_config));
9455
 
3031 serge 9456
		DRM_DEBUG_KMS("[CRTC:%d]\n",
9457
			      crtc->base.base.id);
9458
 
9459
		WARN(crtc->active && !crtc->base.enabled,
9460
		     "active crtc, but not enabled in sw tracking\n");
9461
 
9462
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9463
				    base.head) {
9464
			if (encoder->base.crtc != &crtc->base)
9465
				continue;
9466
			enabled = true;
9467
			if (encoder->connectors_active)
9468
				active = true;
9469
		}
4104 Serge 9470
 
3031 serge 9471
		WARN(active != crtc->active,
9472
		     "crtc's computed active state doesn't match tracked active state "
9473
		     "(expected %i, found %i)\n", active, crtc->active);
9474
		WARN(enabled != crtc->base.enabled,
9475
		     "crtc's computed enabled state doesn't match tracked enabled state "
9476
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
9477
 
3746 Serge 9478
		active = dev_priv->display.get_pipe_config(crtc,
9479
							   &pipe_config);
9480
 
9481
		/* hw state is inconsistent with the pipe A quirk */
9482
		if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
9483
			active = crtc->active;
9484
 
4104 Serge 9485
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9486
				    base.head) {
9487
			enum pipe pipe;
9488
			if (encoder->base.crtc != &crtc->base)
9489
				continue;
4560 Serge 9490
			if (encoder->get_hw_state(encoder, &pipe))
4104 Serge 9491
				encoder->get_config(encoder, &pipe_config);
9492
		}
9493
 
3746 Serge 9494
		WARN(crtc->active != active,
9495
		     "crtc active state doesn't match with hw state "
9496
		     "(expected %i, found %i)\n", crtc->active, active);
9497
 
4104 Serge 9498
		if (active &&
9499
		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
9500
			WARN(1, "pipe state doesn't match!\n");
9501
			intel_dump_pipe_config(crtc, &pipe_config,
9502
					       "[hw state]");
9503
			intel_dump_pipe_config(crtc, &crtc->config,
9504
					       "[sw state]");
9505
		}
3031 serge 9506
	}
9507
}
9508
 
4104 Serge 9509
static void
9510
check_shared_dpll_state(struct drm_device *dev)
9511
{
9512
	drm_i915_private_t *dev_priv = dev->dev_private;
9513
	struct intel_crtc *crtc;
9514
	struct intel_dpll_hw_state dpll_hw_state;
9515
	int i;
9516
 
9517
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9518
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
9519
		int enabled_crtcs = 0, active_crtcs = 0;
9520
		bool active;
9521
 
9522
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
9523
 
9524
		DRM_DEBUG_KMS("%s\n", pll->name);
9525
 
9526
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
9527
 
9528
		WARN(pll->active > pll->refcount,
9529
		     "more active pll users than references: %i vs %i\n",
9530
		     pll->active, pll->refcount);
9531
		WARN(pll->active && !pll->on,
9532
		     "pll in active use but not on in sw tracking\n");
9533
		WARN(pll->on && !pll->active,
9534
		     "pll in on but not on in use in sw tracking\n");
9535
		WARN(pll->on != active,
9536
		     "pll on state mismatch (expected %i, found %i)\n",
9537
		     pll->on, active);
9538
 
9539
		list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9540
				    base.head) {
9541
			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
9542
				enabled_crtcs++;
9543
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
9544
				active_crtcs++;
9545
		}
9546
		WARN(pll->active != active_crtcs,
9547
		     "pll active crtcs mismatch (expected %i, found %i)\n",
9548
		     pll->active, active_crtcs);
9549
		WARN(pll->refcount != enabled_crtcs,
9550
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
9551
		     pll->refcount, enabled_crtcs);
9552
 
9553
		WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
9554
				       sizeof(dpll_hw_state)),
9555
		     "pll hw state mismatch\n");
9556
	}
9557
}
9558
 
9559
void
9560
intel_modeset_check_state(struct drm_device *dev)
9561
{
9562
	check_connector_state(dev);
9563
	check_encoder_state(dev);
9564
	check_crtc_state(dev);
9565
	check_shared_dpll_state(dev);
9566
}
9567
 
4560 Serge 9568
void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
9569
				     int dotclock)
9570
{
9571
	/*
9572
	 * FDI already provided one idea for the dotclock.
9573
	 * Yell if the encoder disagrees.
9574
	 */
9575
	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
9576
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
9577
	     pipe_config->adjusted_mode.crtc_clock, dotclock);
9578
}
9579
 
3746 Serge 9580
static int __intel_set_mode(struct drm_crtc *crtc,
3031 serge 9581
		    struct drm_display_mode *mode,
9582
		    int x, int y, struct drm_framebuffer *fb)
9583
{
9584
	struct drm_device *dev = crtc->dev;
9585
	drm_i915_private_t *dev_priv = dev->dev_private;
4560 Serge 9586
	struct drm_display_mode *saved_mode;
3746 Serge 9587
	struct intel_crtc_config *pipe_config = NULL;
3031 serge 9588
	struct intel_crtc *intel_crtc;
9589
	unsigned disable_pipes, prepare_pipes, modeset_pipes;
3480 Serge 9590
	int ret = 0;
3031 serge 9591
 
4560 Serge 9592
	saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
3480 Serge 9593
	if (!saved_mode)
9594
		return -ENOMEM;
9595
 
3031 serge 9596
	intel_modeset_affected_pipes(crtc, &modeset_pipes,
9597
				     &prepare_pipes, &disable_pipes);
9598
 
3480 Serge 9599
	*saved_mode = crtc->mode;
3031 serge 9600
 
9601
	/* Hack: Because we don't (yet) support global modeset on multiple
9602
	 * crtcs, we don't keep track of the new mode for more than one crtc.
9603
	 * Hence simply check whether any bit is set in modeset_pipes in all the
9604
	 * pieces of code that are not yet converted to deal with mutliple crtcs
9605
	 * changing their mode at the same time. */
9606
	if (modeset_pipes) {
3746 Serge 9607
		pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
9608
		if (IS_ERR(pipe_config)) {
9609
			ret = PTR_ERR(pipe_config);
9610
			pipe_config = NULL;
9611
 
3480 Serge 9612
			goto out;
3031 serge 9613
		}
4104 Serge 9614
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
9615
				       "[modeset]");
3031 serge 9616
	}
9617
 
4560 Serge 9618
	/*
9619
	 * See if the config requires any additional preparation, e.g.
9620
	 * to adjust global state with pipes off.  We need to do this
9621
	 * here so we can get the modeset_pipe updated config for the new
9622
	 * mode set on this crtc.  For other crtcs we need to use the
9623
	 * adjusted_mode bits in the crtc directly.
9624
	 */
9625
	if (IS_VALLEYVIEW(dev)) {
9626
		valleyview_modeset_global_pipes(dev, &prepare_pipes,
9627
						modeset_pipes, pipe_config);
9628
 
9629
		/* may have added more to prepare_pipes than we should */
9630
		prepare_pipes &= ~disable_pipes;
9631
	}
9632
 
3746 Serge 9633
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
9634
		intel_crtc_disable(&intel_crtc->base);
9635
 
3031 serge 9636
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
9637
		if (intel_crtc->base.enabled)
9638
			dev_priv->display.crtc_disable(&intel_crtc->base);
9639
	}
9640
 
9641
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
9642
	 * to set it here already despite that we pass it down the callchain.
2330 Serge 9643
	 */
3746 Serge 9644
	if (modeset_pipes) {
3031 serge 9645
		crtc->mode = *mode;
3746 Serge 9646
		/* mode_set/enable/disable functions rely on a correct pipe
9647
		 * config. */
9648
		to_intel_crtc(crtc)->config = *pipe_config;
4560 Serge 9649
 
9650
		/*
9651
		 * Calculate and store various constants which
9652
		 * are later needed by vblank and swap-completion
9653
		 * timestamping. They are derived from true hwmode.
9654
		 */
9655
		drm_calc_timestamping_constants(crtc,
9656
						&pipe_config->adjusted_mode);
3746 Serge 9657
	}
2327 Serge 9658
 
3031 serge 9659
	/* Only after disabling all output pipelines that will be changed can we
9660
	 * update the the output configuration. */
9661
	intel_modeset_update_state(dev, prepare_pipes);
9662
 
3243 Serge 9663
	if (dev_priv->display.modeset_global_resources)
9664
		dev_priv->display.modeset_global_resources(dev);
9665
 
3031 serge 9666
	/* Set up the DPLL and any encoders state that needs to adjust or depend
9667
	 * on the DPLL.
2330 Serge 9668
	 */
3031 serge 9669
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
3480 Serge 9670
		ret = intel_crtc_mode_set(&intel_crtc->base,
3031 serge 9671
					   x, y, fb);
3480 Serge 9672
		if (ret)
3031 serge 9673
		    goto done;
9674
	}
9675
 
9676
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
9677
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
9678
		dev_priv->display.crtc_enable(&intel_crtc->base);
9679
 
9680
	/* FIXME: add subpixel order */
9681
done:
4560 Serge 9682
	if (ret && crtc->enabled)
3480 Serge 9683
		crtc->mode = *saved_mode;
3031 serge 9684
 
3480 Serge 9685
out:
3746 Serge 9686
	kfree(pipe_config);
3480 Serge 9687
	kfree(saved_mode);
3031 serge 9688
	return ret;
2330 Serge 9689
}
2327 Serge 9690
 
4104 Serge 9691
static int intel_set_mode(struct drm_crtc *crtc,
3746 Serge 9692
		     struct drm_display_mode *mode,
9693
		     int x, int y, struct drm_framebuffer *fb)
9694
{
9695
	int ret;
9696
 
9697
	ret = __intel_set_mode(crtc, mode, x, y, fb);
9698
 
9699
	if (ret == 0)
9700
		intel_modeset_check_state(crtc->dev);
9701
 
9702
	return ret;
9703
}
9704
 
3480 Serge 9705
void intel_crtc_restore_mode(struct drm_crtc *crtc)
9706
{
9707
	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
9708
}
9709
 
3031 serge 9710
#undef for_each_intel_crtc_masked
2327 Serge 9711
 
3031 serge 9712
static void intel_set_config_free(struct intel_set_config *config)
9713
{
9714
	if (!config)
9715
		return;
9716
 
9717
	kfree(config->save_connector_encoders);
9718
	kfree(config->save_encoder_crtcs);
9719
	kfree(config);
9720
}
9721
 
9722
static int intel_set_config_save_state(struct drm_device *dev,
9723
				       struct intel_set_config *config)
9724
{
9725
	struct drm_encoder *encoder;
9726
	struct drm_connector *connector;
9727
	int count;
9728
 
9729
	config->save_encoder_crtcs =
9730
		kcalloc(dev->mode_config.num_encoder,
9731
			sizeof(struct drm_crtc *), GFP_KERNEL);
9732
	if (!config->save_encoder_crtcs)
9733
		return -ENOMEM;
9734
 
9735
	config->save_connector_encoders =
9736
		kcalloc(dev->mode_config.num_connector,
9737
			sizeof(struct drm_encoder *), GFP_KERNEL);
9738
	if (!config->save_connector_encoders)
9739
		return -ENOMEM;
9740
 
9741
	/* Copy data. Note that driver private data is not affected.
9742
	 * Should anything bad happen only the expected state is
9743
	 * restored, not the drivers personal bookkeeping.
9744
	 */
9745
	count = 0;
9746
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
9747
		config->save_encoder_crtcs[count++] = encoder->crtc;
9748
	}
9749
 
9750
	count = 0;
9751
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9752
		config->save_connector_encoders[count++] = connector->encoder;
9753
	}
9754
 
9755
	return 0;
9756
}
9757
 
9758
static void intel_set_config_restore_state(struct drm_device *dev,
9759
					   struct intel_set_config *config)
9760
{
9761
	struct intel_encoder *encoder;
9762
	struct intel_connector *connector;
9763
	int count;
9764
 
9765
	count = 0;
9766
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9767
		encoder->new_crtc =
9768
			to_intel_crtc(config->save_encoder_crtcs[count++]);
9769
	}
9770
 
9771
	count = 0;
9772
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
9773
		connector->new_encoder =
9774
			to_intel_encoder(config->save_connector_encoders[count++]);
9775
	}
9776
}
9777
 
3746 Serge 9778
static bool
4104 Serge 9779
is_crtc_connector_off(struct drm_mode_set *set)
3746 Serge 9780
{
9781
	int i;
9782
 
4104 Serge 9783
	if (set->num_connectors == 0)
9784
		return false;
9785
 
9786
	if (WARN_ON(set->connectors == NULL))
9787
		return false;
9788
 
9789
	for (i = 0; i < set->num_connectors; i++)
9790
		if (set->connectors[i]->encoder &&
9791
		    set->connectors[i]->encoder->crtc == set->crtc &&
9792
		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
3746 Serge 9793
			return true;
9794
 
9795
	return false;
9796
}
9797
 
3031 serge 9798
static void
9799
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
9800
				      struct intel_set_config *config)
9801
{
9802
 
9803
	/* We should be able to check here if the fb has the same properties
9804
	 * and then just flip_or_move it */
4104 Serge 9805
	if (is_crtc_connector_off(set)) {
3746 Serge 9806
			config->mode_changed = true;
9807
	} else if (set->crtc->fb != set->fb) {
3031 serge 9808
		/* If we have no fb then treat it as a full mode set */
9809
		if (set->crtc->fb == NULL) {
4104 Serge 9810
			struct intel_crtc *intel_crtc =
9811
				to_intel_crtc(set->crtc);
9812
 
9813
			if (intel_crtc->active && i915_fastboot) {
9814
				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
9815
				config->fb_changed = true;
9816
			} else {
9817
				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
3031 serge 9818
			config->mode_changed = true;
4104 Serge 9819
			}
3031 serge 9820
		} else if (set->fb == NULL) {
9821
			config->mode_changed = true;
3746 Serge 9822
		} else if (set->fb->pixel_format !=
9823
			   set->crtc->fb->pixel_format) {
3031 serge 9824
			config->mode_changed = true;
3746 Serge 9825
		} else {
3031 serge 9826
			config->fb_changed = true;
9827
	}
3746 Serge 9828
	}
3031 serge 9829
 
9830
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
9831
		config->fb_changed = true;
9832
 
9833
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
9834
		DRM_DEBUG_KMS("modes are different, full mode set\n");
9835
		drm_mode_debug_printmodeline(&set->crtc->mode);
9836
		drm_mode_debug_printmodeline(set->mode);
9837
		config->mode_changed = true;
9838
	}
4104 Serge 9839
 
9840
	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
9841
			set->crtc->base.id, config->mode_changed, config->fb_changed);
3031 serge 9842
}
9843
 
9844
static int
9845
intel_modeset_stage_output_state(struct drm_device *dev,
9846
				 struct drm_mode_set *set,
9847
				 struct intel_set_config *config)
9848
{
9849
	struct drm_crtc *new_crtc;
9850
	struct intel_connector *connector;
9851
	struct intel_encoder *encoder;
4104 Serge 9852
	int ro;
3031 serge 9853
 
3480 Serge 9854
	/* The upper layers ensure that we either disable a crtc or have a list
3031 serge 9855
	 * of connectors. For paranoia, double-check this. */
9856
	WARN_ON(!set->fb && (set->num_connectors != 0));
9857
	WARN_ON(set->fb && (set->num_connectors == 0));
9858
 
9859
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9860
			    base.head) {
9861
		/* Otherwise traverse passed in connector list and get encoders
9862
		 * for them. */
9863
		for (ro = 0; ro < set->num_connectors; ro++) {
9864
			if (set->connectors[ro] == &connector->base) {
9865
				connector->new_encoder = connector->encoder;
9866
				break;
9867
			}
9868
		}
9869
 
9870
		/* If we disable the crtc, disable all its connectors. Also, if
9871
		 * the connector is on the changing crtc but not on the new
9872
		 * connector list, disable it. */
9873
		if ((!set->fb || ro == set->num_connectors) &&
9874
		    connector->base.encoder &&
9875
		    connector->base.encoder->crtc == set->crtc) {
9876
			connector->new_encoder = NULL;
9877
 
9878
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
9879
				connector->base.base.id,
9880
				drm_get_connector_name(&connector->base));
9881
		}
9882
 
9883
 
9884
		if (&connector->new_encoder->base != connector->base.encoder) {
9885
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
9886
			config->mode_changed = true;
9887
		}
9888
	}
9889
	/* connector->new_encoder is now updated for all connectors. */
9890
 
9891
	/* Update crtc of enabled connectors. */
9892
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9893
			    base.head) {
9894
		if (!connector->new_encoder)
9895
			continue;
9896
 
9897
		new_crtc = connector->new_encoder->base.crtc;
9898
 
9899
		for (ro = 0; ro < set->num_connectors; ro++) {
9900
			if (set->connectors[ro] == &connector->base)
9901
				new_crtc = set->crtc;
9902
		}
9903
 
9904
		/* Make sure the new CRTC will work with the encoder */
4560 Serge 9905
		if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
3031 serge 9906
					   new_crtc)) {
9907
			return -EINVAL;
9908
		}
9909
		connector->encoder->new_crtc = to_intel_crtc(new_crtc);
9910
 
9911
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
9912
			connector->base.base.id,
9913
			drm_get_connector_name(&connector->base),
9914
			new_crtc->base.id);
9915
	}
9916
 
9917
	/* Check for any encoders that needs to be disabled. */
9918
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9919
			    base.head) {
4560 Serge 9920
		int num_connectors = 0;
3031 serge 9921
		list_for_each_entry(connector,
9922
				    &dev->mode_config.connector_list,
9923
				    base.head) {
9924
			if (connector->new_encoder == encoder) {
9925
				WARN_ON(!connector->new_encoder->new_crtc);
4560 Serge 9926
				num_connectors++;
3031 serge 9927
			}
9928
		}
4560 Serge 9929
 
9930
		if (num_connectors == 0)
3031 serge 9931
		encoder->new_crtc = NULL;
4560 Serge 9932
		else if (num_connectors > 1)
9933
			return -EINVAL;
9934
 
3031 serge 9935
		/* Only now check for crtc changes so we don't miss encoders
9936
		 * that will be disabled. */
9937
		if (&encoder->new_crtc->base != encoder->base.crtc) {
9938
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
9939
			config->mode_changed = true;
9940
		}
9941
	}
9942
	/* Now we've also updated encoder->new_crtc for all encoders. */
9943
 
9944
	return 0;
9945
}
9946
 
9947
static int intel_crtc_set_config(struct drm_mode_set *set)
9948
{
9949
	struct drm_device *dev;
9950
	struct drm_mode_set save_set;
9951
	struct intel_set_config *config;
9952
	int ret;
9953
 
9954
	BUG_ON(!set);
9955
	BUG_ON(!set->crtc);
9956
	BUG_ON(!set->crtc->helper_private);
9957
 
3480 Serge 9958
	/* Enforce sane interface api - has been abused by the fb helper. */
9959
	BUG_ON(!set->mode && set->fb);
9960
	BUG_ON(set->fb && set->num_connectors == 0);
3031 serge 9961
 
9962
	if (set->fb) {
9963
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
9964
				set->crtc->base.id, set->fb->base.id,
9965
				(int)set->num_connectors, set->x, set->y);
9966
	} else {
9967
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
9968
	}
9969
 
9970
	dev = set->crtc->dev;
9971
 
9972
	ret = -ENOMEM;
9973
	config = kzalloc(sizeof(*config), GFP_KERNEL);
9974
	if (!config)
9975
		goto out_config;
9976
 
9977
	ret = intel_set_config_save_state(dev, config);
9978
	if (ret)
9979
		goto out_config;
9980
 
9981
	save_set.crtc = set->crtc;
9982
	save_set.mode = &set->crtc->mode;
9983
	save_set.x = set->crtc->x;
9984
	save_set.y = set->crtc->y;
9985
	save_set.fb = set->crtc->fb;
9986
 
9987
	/* Compute whether we need a full modeset, only an fb base update or no
9988
	 * change at all. In the future we might also check whether only the
9989
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
9990
	 * such cases. */
9991
	intel_set_config_compute_mode_changes(set, config);
9992
 
9993
	ret = intel_modeset_stage_output_state(dev, set, config);
9994
	if (ret)
9995
		goto fail;
9996
 
9997
	if (config->mode_changed) {
3480 Serge 9998
		ret = intel_set_mode(set->crtc, set->mode,
9999
				     set->x, set->y, set->fb);
3031 serge 10000
	} else if (config->fb_changed) {
3746 Serge 10001
//       intel_crtc_wait_for_pending_flips(set->crtc);
10002
 
3031 serge 10003
		ret = intel_pipe_set_base(set->crtc,
10004
					  set->x, set->y, set->fb);
4560 Serge 10005
		/*
10006
		 * In the fastboot case this may be our only check of the
10007
		 * state after boot.  It would be better to only do it on
10008
		 * the first update, but we don't have a nice way of doing that
10009
		 * (and really, set_config isn't used much for high freq page
10010
		 * flipping, so increasing its cost here shouldn't be a big
10011
		 * deal).
10012
		 */
10013
		if (i915_fastboot && ret == 0)
10014
			intel_modeset_check_state(set->crtc->dev);
3031 serge 10015
	}
10016
 
3746 Serge 10017
	if (ret) {
4104 Serge 10018
		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
3746 Serge 10019
			  set->crtc->base.id, ret);
3031 serge 10020
fail:
10021
	intel_set_config_restore_state(dev, config);
10022
 
10023
	/* Try to restore the config */
10024
	if (config->mode_changed &&
3480 Serge 10025
	    intel_set_mode(save_set.crtc, save_set.mode,
3031 serge 10026
			    save_set.x, save_set.y, save_set.fb))
10027
		DRM_ERROR("failed to restore config after modeset failure\n");
3746 Serge 10028
	}
3031 serge 10029
 
10030
out_config:
10031
	intel_set_config_free(config);
10032
	return ret;
10033
}
10034
 
2330 Serge 10035
static const struct drm_crtc_funcs intel_crtc_funcs = {
10036
//	.cursor_set = intel_crtc_cursor_set,
4557 Serge 10037
	.cursor_move = intel_crtc_cursor_move,
2330 Serge 10038
	.gamma_set = intel_crtc_gamma_set,
3031 serge 10039
	.set_config = intel_crtc_set_config,
2330 Serge 10040
	.destroy = intel_crtc_destroy,
10041
//	.page_flip = intel_crtc_page_flip,
10042
};
2327 Serge 10043
 
3243 Serge 10044
static void intel_cpu_pll_init(struct drm_device *dev)
10045
{
3480 Serge 10046
	if (HAS_DDI(dev))
3243 Serge 10047
		intel_ddi_pll_init(dev);
10048
}
10049
 
4104 Serge 10050
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
10051
				      struct intel_shared_dpll *pll,
10052
				      struct intel_dpll_hw_state *hw_state)
3031 serge 10053
{
4104 Serge 10054
	uint32_t val;
3031 serge 10055
 
4104 Serge 10056
	val = I915_READ(PCH_DPLL(pll->id));
10057
	hw_state->dpll = val;
10058
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
10059
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
10060
 
10061
	return val & DPLL_VCO_ENABLE;
10062
}
10063
 
10064
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
10065
				  struct intel_shared_dpll *pll)
10066
{
10067
	I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
10068
	I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
10069
}
10070
 
10071
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
10072
				struct intel_shared_dpll *pll)
10073
{
10074
	/* PCH refclock must be enabled first */
4560 Serge 10075
	ibx_assert_pch_refclk_enabled(dev_priv);
4104 Serge 10076
 
10077
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10078
 
10079
	/* Wait for the clocks to stabilize. */
10080
	POSTING_READ(PCH_DPLL(pll->id));
10081
	udelay(150);
10082
 
10083
	/* The pixel multiplier can only be updated once the
10084
	 * DPLL is enabled and the clocks are stable.
10085
	 *
10086
	 * So write it again.
10087
	 */
10088
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10089
	POSTING_READ(PCH_DPLL(pll->id));
10090
	udelay(200);
10091
}
10092
 
10093
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
10094
				 struct intel_shared_dpll *pll)
10095
{
10096
	struct drm_device *dev = dev_priv->dev;
10097
	struct intel_crtc *crtc;
10098
 
10099
	/* Make sure no transcoder isn't still depending on us. */
10100
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
10101
		if (intel_crtc_to_shared_dpll(crtc) == pll)
10102
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
3031 serge 10103
	}
10104
 
4104 Serge 10105
	I915_WRITE(PCH_DPLL(pll->id), 0);
10106
	POSTING_READ(PCH_DPLL(pll->id));
10107
	udelay(200);
10108
}
10109
 
10110
static char *ibx_pch_dpll_names[] = {
10111
	"PCH DPLL A",
10112
	"PCH DPLL B",
10113
};
10114
 
10115
static void ibx_pch_dpll_init(struct drm_device *dev)
10116
{
10117
	struct drm_i915_private *dev_priv = dev->dev_private;
10118
	int i;
10119
 
10120
	dev_priv->num_shared_dpll = 2;
10121
 
10122
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10123
		dev_priv->shared_dplls[i].id = i;
10124
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
10125
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
10126
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
10127
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
10128
		dev_priv->shared_dplls[i].get_hw_state =
10129
			ibx_pch_dpll_get_hw_state;
3031 serge 10130
	}
10131
}
10132
 
4104 Serge 10133
static void intel_shared_dpll_init(struct drm_device *dev)
10134
{
10135
	struct drm_i915_private *dev_priv = dev->dev_private;
10136
 
10137
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
10138
		ibx_pch_dpll_init(dev);
10139
	else
10140
		dev_priv->num_shared_dpll = 0;
10141
 
10142
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
10143
}
10144
 
2330 Serge 10145
static void intel_crtc_init(struct drm_device *dev, int pipe)
10146
{
10147
	drm_i915_private_t *dev_priv = dev->dev_private;
10148
	struct intel_crtc *intel_crtc;
10149
	int i;
2327 Serge 10150
 
4560 Serge 10151
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
2330 Serge 10152
	if (intel_crtc == NULL)
10153
		return;
2327 Serge 10154
 
2330 Serge 10155
	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
2327 Serge 10156
 
2330 Serge 10157
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
10158
	for (i = 0; i < 256; i++) {
10159
		intel_crtc->lut_r[i] = i;
10160
		intel_crtc->lut_g[i] = i;
10161
		intel_crtc->lut_b[i] = i;
10162
	}
2327 Serge 10163
 
4560 Serge 10164
	/*
10165
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
10166
	 * is hooked to plane B. Hence we want plane A feeding pipe B.
10167
	 */
2330 Serge 10168
	intel_crtc->pipe = pipe;
10169
	intel_crtc->plane = pipe;
4560 Serge 10170
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
2330 Serge 10171
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
10172
		intel_crtc->plane = !pipe;
10173
	}
2327 Serge 10174
 
2330 Serge 10175
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
10176
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
10177
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
10178
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 10179
 
2330 Serge 10180
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
10181
}
2327 Serge 10182
 
4560 Serge 10183
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
10184
{
10185
	struct drm_encoder *encoder = connector->base.encoder;
10186
 
10187
	WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
10188
 
10189
	if (!encoder)
10190
		return INVALID_PIPE;
10191
 
10192
	return to_intel_crtc(encoder->crtc)->pipe;
10193
}
10194
 
3031 serge 10195
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
10196
				struct drm_file *file)
10197
{
10198
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
10199
	struct drm_mode_object *drmmode_obj;
10200
	struct intel_crtc *crtc;
2327 Serge 10201
 
3482 Serge 10202
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
10203
		return -ENODEV;
10204
 
3031 serge 10205
	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
10206
			DRM_MODE_OBJECT_CRTC);
2327 Serge 10207
 
3031 serge 10208
	if (!drmmode_obj) {
10209
		DRM_ERROR("no such CRTC id\n");
4560 Serge 10210
		return -ENOENT;
3031 serge 10211
	}
2327 Serge 10212
 
3031 serge 10213
	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
10214
	pipe_from_crtc_id->pipe = crtc->pipe;
2327 Serge 10215
 
3031 serge 10216
	return 0;
10217
}
2327 Serge 10218
 
3031 serge 10219
static int intel_encoder_clones(struct intel_encoder *encoder)
2330 Serge 10220
{
3031 serge 10221
	struct drm_device *dev = encoder->base.dev;
10222
	struct intel_encoder *source_encoder;
2330 Serge 10223
	int index_mask = 0;
10224
	int entry = 0;
2327 Serge 10225
 
3031 serge 10226
	list_for_each_entry(source_encoder,
10227
			    &dev->mode_config.encoder_list, base.head) {
10228
 
10229
		if (encoder == source_encoder)
2330 Serge 10230
			index_mask |= (1 << entry);
3031 serge 10231
 
10232
		/* Intel hw has only one MUX where enocoders could be cloned. */
10233
		if (encoder->cloneable && source_encoder->cloneable)
10234
			index_mask |= (1 << entry);
10235
 
2330 Serge 10236
		entry++;
10237
	}
2327 Serge 10238
 
2330 Serge 10239
	return index_mask;
10240
}
2327 Serge 10241
 
2330 Serge 10242
static bool has_edp_a(struct drm_device *dev)
10243
{
10244
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 10245
 
2330 Serge 10246
	if (!IS_MOBILE(dev))
10247
		return false;
2327 Serge 10248
 
2330 Serge 10249
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
10250
		return false;
2327 Serge 10251
 
2330 Serge 10252
	if (IS_GEN5(dev) &&
10253
	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
10254
		return false;
2327 Serge 10255
 
2330 Serge 10256
	return true;
10257
}
2327 Serge 10258
 
4560 Serge 10259
const char *intel_output_name(int output)
10260
{
10261
	static const char *names[] = {
10262
		[INTEL_OUTPUT_UNUSED] = "Unused",
10263
		[INTEL_OUTPUT_ANALOG] = "Analog",
10264
		[INTEL_OUTPUT_DVO] = "DVO",
10265
		[INTEL_OUTPUT_SDVO] = "SDVO",
10266
		[INTEL_OUTPUT_LVDS] = "LVDS",
10267
		[INTEL_OUTPUT_TVOUT] = "TV",
10268
		[INTEL_OUTPUT_HDMI] = "HDMI",
10269
		[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
10270
		[INTEL_OUTPUT_EDP] = "eDP",
10271
		[INTEL_OUTPUT_DSI] = "DSI",
10272
		[INTEL_OUTPUT_UNKNOWN] = "Unknown",
10273
	};
10274
 
10275
	if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
10276
		return "Invalid";
10277
 
10278
	return names[output];
10279
}
10280
 
2330 Serge 10281
static void intel_setup_outputs(struct drm_device *dev)
10282
{
10283
	struct drm_i915_private *dev_priv = dev->dev_private;
10284
	struct intel_encoder *encoder;
10285
	bool dpd_is_edp = false;
2327 Serge 10286
 
4104 Serge 10287
	intel_lvds_init(dev);
2327 Serge 10288
 
3746 Serge 10289
	if (!IS_ULT(dev))
2330 Serge 10290
	intel_crt_init(dev);
2327 Serge 10291
 
3480 Serge 10292
	if (HAS_DDI(dev)) {
2330 Serge 10293
		int found;
2327 Serge 10294
 
3031 serge 10295
		/* Haswell uses DDI functions to detect digital outputs */
10296
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
10297
		/* DDI A only supports eDP */
10298
		if (found)
10299
			intel_ddi_init(dev, PORT_A);
10300
 
10301
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
10302
		 * register */
10303
		found = I915_READ(SFUSE_STRAP);
10304
 
10305
		if (found & SFUSE_STRAP_DDIB_DETECTED)
10306
			intel_ddi_init(dev, PORT_B);
10307
		if (found & SFUSE_STRAP_DDIC_DETECTED)
10308
			intel_ddi_init(dev, PORT_C);
10309
		if (found & SFUSE_STRAP_DDID_DETECTED)
10310
			intel_ddi_init(dev, PORT_D);
10311
	} else if (HAS_PCH_SPLIT(dev)) {
10312
		int found;
4560 Serge 10313
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
3031 serge 10314
 
3243 Serge 10315
		if (has_edp_a(dev))
10316
			intel_dp_init(dev, DP_A, PORT_A);
10317
 
3746 Serge 10318
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
2330 Serge 10319
			/* PCH SDVOB multiplex with HDMIB */
3031 serge 10320
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
2330 Serge 10321
			if (!found)
3746 Serge 10322
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
2330 Serge 10323
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
3031 serge 10324
				intel_dp_init(dev, PCH_DP_B, PORT_B);
2330 Serge 10325
		}
2327 Serge 10326
 
3746 Serge 10327
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
10328
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
2327 Serge 10329
 
3746 Serge 10330
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
10331
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
2327 Serge 10332
 
2330 Serge 10333
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
3031 serge 10334
			intel_dp_init(dev, PCH_DP_C, PORT_C);
2327 Serge 10335
 
3243 Serge 10336
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
3031 serge 10337
			intel_dp_init(dev, PCH_DP_D, PORT_D);
10338
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 10339
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
10340
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
10341
					PORT_B);
10342
			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
10343
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
10344
		}
10345
 
4104 Serge 10346
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
10347
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
10348
					PORT_C);
3480 Serge 10349
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
4560 Serge 10350
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
4104 Serge 10351
		}
3243 Serge 10352
 
4560 Serge 10353
		intel_dsi_init(dev);
2330 Serge 10354
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
10355
		bool found = false;
2327 Serge 10356
 
3746 Serge 10357
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 10358
			DRM_DEBUG_KMS("probing SDVOB\n");
3746 Serge 10359
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
2330 Serge 10360
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
10361
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
3746 Serge 10362
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
2330 Serge 10363
			}
2327 Serge 10364
 
4104 Serge 10365
			if (!found && SUPPORTS_INTEGRATED_DP(dev))
3031 serge 10366
				intel_dp_init(dev, DP_B, PORT_B);
2330 Serge 10367
			}
2327 Serge 10368
 
2330 Serge 10369
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 10370
 
3746 Serge 10371
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 10372
			DRM_DEBUG_KMS("probing SDVOC\n");
3746 Serge 10373
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
2330 Serge 10374
		}
2327 Serge 10375
 
3746 Serge 10376
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
2327 Serge 10377
 
2330 Serge 10378
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
10379
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
3746 Serge 10380
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
2330 Serge 10381
			}
4104 Serge 10382
			if (SUPPORTS_INTEGRATED_DP(dev))
3031 serge 10383
				intel_dp_init(dev, DP_C, PORT_C);
2330 Serge 10384
			}
2327 Serge 10385
 
2330 Serge 10386
		if (SUPPORTS_INTEGRATED_DP(dev) &&
4104 Serge 10387
		    (I915_READ(DP_D) & DP_DETECTED))
3031 serge 10388
			intel_dp_init(dev, DP_D, PORT_D);
2330 Serge 10389
	} else if (IS_GEN2(dev))
10390
		intel_dvo_init(dev);
2327 Serge 10391
 
2330 Serge 10392
//   if (SUPPORTS_TV(dev))
10393
//       intel_tv_init(dev);
2327 Serge 10394
 
2330 Serge 10395
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10396
		encoder->base.possible_crtcs = encoder->crtc_mask;
10397
		encoder->base.possible_clones =
3031 serge 10398
			intel_encoder_clones(encoder);
2330 Serge 10399
	}
2327 Serge 10400
 
3243 Serge 10401
	intel_init_pch_refclk(dev);
10402
 
10403
	drm_helper_move_panel_connectors_to_head(dev);
2330 Serge 10404
}
10405
 
10406
 
10407
 
2335 Serge 10408
static const struct drm_framebuffer_funcs intel_fb_funcs = {
10409
//	.destroy = intel_user_framebuffer_destroy,
10410
//	.create_handle = intel_user_framebuffer_create_handle,
10411
};
2327 Serge 10412
 
2335 Serge 10413
int intel_framebuffer_init(struct drm_device *dev,
10414
			   struct intel_framebuffer *intel_fb,
2342 Serge 10415
			   struct drm_mode_fb_cmd2 *mode_cmd,
2335 Serge 10416
			   struct drm_i915_gem_object *obj)
10417
{
4560 Serge 10418
	int aligned_height, tile_height;
4104 Serge 10419
	int pitch_limit;
2335 Serge 10420
	int ret;
2327 Serge 10421
 
4560 Serge 10422
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
10423
 
3243 Serge 10424
	if (obj->tiling_mode == I915_TILING_Y) {
10425
		DRM_DEBUG("hardware does not support tiling Y\n");
2335 Serge 10426
		return -EINVAL;
3243 Serge 10427
	}
2327 Serge 10428
 
3243 Serge 10429
	if (mode_cmd->pitches[0] & 63) {
10430
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
10431
			  mode_cmd->pitches[0]);
10432
		return -EINVAL;
10433
	}
10434
 
4104 Serge 10435
	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
10436
		pitch_limit = 32*1024;
10437
	} else if (INTEL_INFO(dev)->gen >= 4) {
10438
		if (obj->tiling_mode)
10439
			pitch_limit = 16*1024;
10440
		else
10441
			pitch_limit = 32*1024;
10442
	} else if (INTEL_INFO(dev)->gen >= 3) {
10443
		if (obj->tiling_mode)
10444
			pitch_limit = 8*1024;
10445
		else
10446
			pitch_limit = 16*1024;
10447
	} else
10448
		/* XXX DSPC is limited to 4k tiled */
10449
		pitch_limit = 8*1024;
10450
 
10451
	if (mode_cmd->pitches[0] > pitch_limit) {
10452
		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
10453
			  obj->tiling_mode ? "tiled" : "linear",
10454
			  mode_cmd->pitches[0], pitch_limit);
3243 Serge 10455
		return -EINVAL;
10456
	}
10457
 
10458
	if (obj->tiling_mode != I915_TILING_NONE &&
10459
	    mode_cmd->pitches[0] != obj->stride) {
10460
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
10461
			  mode_cmd->pitches[0], obj->stride);
2335 Serge 10462
			return -EINVAL;
3243 Serge 10463
	}
2327 Serge 10464
 
3243 Serge 10465
	/* Reject formats not supported by any plane early. */
2342 Serge 10466
	switch (mode_cmd->pixel_format) {
3243 Serge 10467
	case DRM_FORMAT_C8:
2342 Serge 10468
	case DRM_FORMAT_RGB565:
10469
	case DRM_FORMAT_XRGB8888:
3243 Serge 10470
	case DRM_FORMAT_ARGB8888:
10471
		break;
10472
	case DRM_FORMAT_XRGB1555:
10473
	case DRM_FORMAT_ARGB1555:
10474
		if (INTEL_INFO(dev)->gen > 3) {
4104 Serge 10475
			DRM_DEBUG("unsupported pixel format: %s\n",
10476
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 10477
			return -EINVAL;
10478
		}
10479
		break;
3031 serge 10480
	case DRM_FORMAT_XBGR8888:
3243 Serge 10481
	case DRM_FORMAT_ABGR8888:
2342 Serge 10482
	case DRM_FORMAT_XRGB2101010:
10483
	case DRM_FORMAT_ARGB2101010:
3243 Serge 10484
	case DRM_FORMAT_XBGR2101010:
10485
	case DRM_FORMAT_ABGR2101010:
10486
		if (INTEL_INFO(dev)->gen < 4) {
4104 Serge 10487
			DRM_DEBUG("unsupported pixel format: %s\n",
10488
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 10489
			return -EINVAL;
10490
		}
2335 Serge 10491
		break;
2342 Serge 10492
	case DRM_FORMAT_YUYV:
10493
	case DRM_FORMAT_UYVY:
10494
	case DRM_FORMAT_YVYU:
10495
	case DRM_FORMAT_VYUY:
3243 Serge 10496
		if (INTEL_INFO(dev)->gen < 5) {
4104 Serge 10497
			DRM_DEBUG("unsupported pixel format: %s\n",
10498
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 10499
			return -EINVAL;
10500
		}
2342 Serge 10501
		break;
2335 Serge 10502
	default:
4104 Serge 10503
		DRM_DEBUG("unsupported pixel format: %s\n",
10504
			  drm_get_format_name(mode_cmd->pixel_format));
2335 Serge 10505
		return -EINVAL;
10506
	}
2327 Serge 10507
 
3243 Serge 10508
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
10509
	if (mode_cmd->offsets[0] != 0)
10510
		return -EINVAL;
10511
 
4560 Serge 10512
	tile_height = IS_GEN2(dev) ? 16 : 8;
10513
	aligned_height = ALIGN(mode_cmd->height,
10514
			       obj->tiling_mode ? tile_height : 1);
10515
	/* FIXME drm helper for size checks (especially planar formats)? */
10516
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
10517
		return -EINVAL;
10518
 
3480 Serge 10519
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
10520
	intel_fb->obj = obj;
4560 Serge 10521
	intel_fb->obj->framebuffer_references++;
3480 Serge 10522
 
2335 Serge 10523
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
10524
	if (ret) {
10525
		DRM_ERROR("framebuffer init failed %d\n", ret);
10526
		return ret;
10527
	}
2327 Serge 10528
 
2335 Serge 10529
	return 0;
10530
}
2327 Serge 10531
 
4560 Serge 10532
#ifndef CONFIG_DRM_I915_FBDEV
10533
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
10534
{
10535
}
10536
#endif
2327 Serge 10537
 
2360 Serge 10538
static const struct drm_mode_config_funcs intel_mode_funcs = {
4560 Serge 10539
	.fb_create = NULL,
10540
	.output_poll_changed = intel_fbdev_output_poll_changed,
2360 Serge 10541
};
2327 Serge 10542
 
3031 serge 10543
/* Set up chip specific display functions */
10544
static void intel_init_display(struct drm_device *dev)
10545
{
10546
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 10547
 
4104 Serge 10548
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
10549
		dev_priv->display.find_dpll = g4x_find_best_dpll;
10550
	else if (IS_VALLEYVIEW(dev))
10551
		dev_priv->display.find_dpll = vlv_find_best_dpll;
10552
	else if (IS_PINEVIEW(dev))
10553
		dev_priv->display.find_dpll = pnv_find_best_dpll;
10554
	else
10555
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
10556
 
3480 Serge 10557
	if (HAS_DDI(dev)) {
3746 Serge 10558
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
3243 Serge 10559
		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
10560
		dev_priv->display.crtc_enable = haswell_crtc_enable;
10561
		dev_priv->display.crtc_disable = haswell_crtc_disable;
10562
		dev_priv->display.off = haswell_crtc_off;
10563
		dev_priv->display.update_plane = ironlake_update_plane;
10564
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 10565
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
3031 serge 10566
		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
10567
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
10568
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
10569
		dev_priv->display.off = ironlake_crtc_off;
10570
		dev_priv->display.update_plane = ironlake_update_plane;
4104 Serge 10571
	} else if (IS_VALLEYVIEW(dev)) {
10572
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
10573
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
10574
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
10575
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
10576
		dev_priv->display.off = i9xx_crtc_off;
10577
		dev_priv->display.update_plane = i9xx_update_plane;
3031 serge 10578
	} else {
3746 Serge 10579
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
3031 serge 10580
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
10581
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
10582
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
10583
		dev_priv->display.off = i9xx_crtc_off;
10584
		dev_priv->display.update_plane = i9xx_update_plane;
10585
	}
2327 Serge 10586
 
3031 serge 10587
	/* Returns the core display clock speed */
10588
	if (IS_VALLEYVIEW(dev))
10589
		dev_priv->display.get_display_clock_speed =
10590
			valleyview_get_display_clock_speed;
10591
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
10592
		dev_priv->display.get_display_clock_speed =
10593
			i945_get_display_clock_speed;
10594
	else if (IS_I915G(dev))
10595
		dev_priv->display.get_display_clock_speed =
10596
			i915_get_display_clock_speed;
4104 Serge 10597
	else if (IS_I945GM(dev) || IS_845G(dev))
3031 serge 10598
		dev_priv->display.get_display_clock_speed =
10599
			i9xx_misc_get_display_clock_speed;
4104 Serge 10600
	else if (IS_PINEVIEW(dev))
10601
		dev_priv->display.get_display_clock_speed =
10602
			pnv_get_display_clock_speed;
3031 serge 10603
	else if (IS_I915GM(dev))
10604
		dev_priv->display.get_display_clock_speed =
10605
			i915gm_get_display_clock_speed;
10606
	else if (IS_I865G(dev))
10607
		dev_priv->display.get_display_clock_speed =
10608
			i865_get_display_clock_speed;
10609
	else if (IS_I85X(dev))
10610
		dev_priv->display.get_display_clock_speed =
10611
			i855_get_display_clock_speed;
10612
	else /* 852, 830 */
10613
		dev_priv->display.get_display_clock_speed =
10614
			i830_get_display_clock_speed;
2327 Serge 10615
 
3031 serge 10616
	if (HAS_PCH_SPLIT(dev)) {
10617
		if (IS_GEN5(dev)) {
10618
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
10619
			dev_priv->display.write_eld = ironlake_write_eld;
10620
		} else if (IS_GEN6(dev)) {
10621
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
10622
			dev_priv->display.write_eld = ironlake_write_eld;
10623
		} else if (IS_IVYBRIDGE(dev)) {
10624
			/* FIXME: detect B0+ stepping and use auto training */
10625
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
10626
			dev_priv->display.write_eld = ironlake_write_eld;
3243 Serge 10627
			dev_priv->display.modeset_global_resources =
10628
				ivb_modeset_global_resources;
4560 Serge 10629
		} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
3031 serge 10630
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
10631
			dev_priv->display.write_eld = haswell_write_eld;
3480 Serge 10632
			dev_priv->display.modeset_global_resources =
10633
				haswell_modeset_global_resources;
10634
		}
3031 serge 10635
	} else if (IS_G4X(dev)) {
10636
		dev_priv->display.write_eld = g4x_write_eld;
4560 Serge 10637
	} else if (IS_VALLEYVIEW(dev)) {
10638
		dev_priv->display.modeset_global_resources =
10639
			valleyview_modeset_global_resources;
10640
		dev_priv->display.write_eld = ironlake_write_eld;
3031 serge 10641
	}
2327 Serge 10642
 
3031 serge 10643
	/* Default just returns -ENODEV to indicate unsupported */
10644
//	dev_priv->display.queue_flip = intel_default_queue_flip;
2327 Serge 10645
 
10646
 
10647
 
10648
 
4560 Serge 10649
	intel_panel_init_backlight_funcs(dev);
3031 serge 10650
}
10651
 
10652
/*
10653
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
10654
 * resume, or other times.  This quirk makes sure that's the case for
10655
 * affected systems.
10656
 */
10657
static void quirk_pipea_force(struct drm_device *dev)
2330 Serge 10658
{
10659
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 10660
 
3031 serge 10661
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
10662
	DRM_INFO("applying pipe a force quirk\n");
10663
}
2327 Serge 10664
 
3031 serge 10665
/*
10666
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
10667
 */
10668
static void quirk_ssc_force_disable(struct drm_device *dev)
10669
{
10670
	struct drm_i915_private *dev_priv = dev->dev_private;
10671
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
10672
	DRM_INFO("applying lvds SSC disable quirk\n");
2330 Serge 10673
}
2327 Serge 10674
 
3031 serge 10675
/*
10676
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
10677
 * brightness value
10678
 */
10679
static void quirk_invert_brightness(struct drm_device *dev)
2330 Serge 10680
{
10681
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10682
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
10683
	DRM_INFO("applying inverted panel brightness quirk\n");
10684
}
2327 Serge 10685
 
3031 serge 10686
struct intel_quirk {
10687
	int device;
10688
	int subsystem_vendor;
10689
	int subsystem_device;
10690
	void (*hook)(struct drm_device *dev);
10691
};
2327 Serge 10692
 
3031 serge 10693
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
10694
struct intel_dmi_quirk {
10695
	void (*hook)(struct drm_device *dev);
10696
	const struct dmi_system_id (*dmi_id_list)[];
10697
};
2327 Serge 10698
 
3031 serge 10699
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
10700
{
10701
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
10702
	return 1;
2330 Serge 10703
}
2327 Serge 10704
 
3031 serge 10705
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
10706
	{
10707
		.dmi_id_list = &(const struct dmi_system_id[]) {
10708
			{
10709
				.callback = intel_dmi_reverse_brightness,
10710
				.ident = "NCR Corporation",
10711
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
10712
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
10713
				},
10714
			},
10715
			{ }  /* terminating entry */
10716
		},
10717
		.hook = quirk_invert_brightness,
10718
	},
10719
};
2327 Serge 10720
 
3031 serge 10721
static struct intel_quirk intel_quirks[] = {
10722
	/* HP Mini needs pipe A force quirk (LP: #322104) */
10723
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
2327 Serge 10724
 
3031 serge 10725
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
10726
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
2327 Serge 10727
 
3031 serge 10728
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
10729
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
2327 Serge 10730
 
4560 Serge 10731
	/* 830 needs to leave pipe A & dpll A up */
3031 serge 10732
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
2327 Serge 10733
 
3031 serge 10734
	/* Lenovo U160 cannot use SSC on LVDS */
10735
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
2327 Serge 10736
 
3031 serge 10737
	/* Sony Vaio Y cannot use SSC on LVDS */
10738
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
2327 Serge 10739
 
3031 serge 10740
	/* Acer Aspire 5734Z must invert backlight brightness */
10741
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
3480 Serge 10742
 
10743
	/* Acer/eMachines G725 */
10744
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10745
 
10746
	/* Acer/eMachines e725 */
10747
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10748
 
10749
	/* Acer/Packard Bell NCL20 */
10750
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10751
 
10752
	/* Acer Aspire 4736Z */
10753
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
3031 serge 10754
};
2327 Serge 10755
 
3031 serge 10756
static void intel_init_quirks(struct drm_device *dev)
2330 Serge 10757
{
3031 serge 10758
	struct pci_dev *d = dev->pdev;
10759
	int i;
2327 Serge 10760
 
3031 serge 10761
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
10762
		struct intel_quirk *q = &intel_quirks[i];
2327 Serge 10763
 
3031 serge 10764
		if (d->device == q->device &&
10765
		    (d->subsystem_vendor == q->subsystem_vendor ||
10766
		     q->subsystem_vendor == PCI_ANY_ID) &&
10767
		    (d->subsystem_device == q->subsystem_device ||
10768
		     q->subsystem_device == PCI_ANY_ID))
10769
			q->hook(dev);
10770
	}
2330 Serge 10771
}
2327 Serge 10772
 
3031 serge 10773
/* Disable the VGA plane that we never use */
10774
static void i915_disable_vga(struct drm_device *dev)
2330 Serge 10775
{
10776
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10777
	u8 sr1;
3480 Serge 10778
	u32 vga_reg = i915_vgacntrl_reg(dev);
2327 Serge 10779
 
4560 Serge 10780
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10781
	outb(SR01, VGA_SR_INDEX);
10782
	sr1 = inb(VGA_SR_DATA);
10783
	outb(sr1 | 1<<5, VGA_SR_DATA);
10784
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
3031 serge 10785
	udelay(300);
2327 Serge 10786
 
3031 serge 10787
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
10788
	POSTING_READ(vga_reg);
2330 Serge 10789
}
10790
 
3031 serge 10791
void intel_modeset_init_hw(struct drm_device *dev)
2342 Serge 10792
{
3031 serge 10793
	intel_prepare_ddi(dev);
2342 Serge 10794
 
3031 serge 10795
	intel_init_clock_gating(dev);
10796
 
4560 Serge 10797
	intel_reset_dpio(dev);
4398 Serge 10798
 
3482 Serge 10799
    mutex_lock(&dev->struct_mutex);
10800
    intel_enable_gt_powersave(dev);
10801
    mutex_unlock(&dev->struct_mutex);
2342 Serge 10802
}
10803
 
4398 Serge 10804
void intel_modeset_suspend_hw(struct drm_device *dev)
10805
{
10806
	intel_suspend_hw(dev);
10807
}
10808
 
3031 serge 10809
void intel_modeset_init(struct drm_device *dev)
2330 Serge 10810
{
3031 serge 10811
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 10812
	int i, j, ret;
2330 Serge 10813
 
3031 serge 10814
	drm_mode_config_init(dev);
2330 Serge 10815
 
3031 serge 10816
	dev->mode_config.min_width = 0;
10817
	dev->mode_config.min_height = 0;
2330 Serge 10818
 
3031 serge 10819
	dev->mode_config.preferred_depth = 24;
10820
	dev->mode_config.prefer_shadow = 1;
2330 Serge 10821
 
3031 serge 10822
	dev->mode_config.funcs = &intel_mode_funcs;
2330 Serge 10823
 
3031 serge 10824
	intel_init_quirks(dev);
2330 Serge 10825
 
3031 serge 10826
	intel_init_pm(dev);
2330 Serge 10827
 
3746 Serge 10828
	if (INTEL_INFO(dev)->num_pipes == 0)
10829
		return;
10830
 
3031 serge 10831
	intel_init_display(dev);
2330 Serge 10832
 
3031 serge 10833
	if (IS_GEN2(dev)) {
10834
		dev->mode_config.max_width = 2048;
10835
		dev->mode_config.max_height = 2048;
10836
	} else if (IS_GEN3(dev)) {
10837
		dev->mode_config.max_width = 4096;
10838
		dev->mode_config.max_height = 4096;
10839
	} else {
10840
		dev->mode_config.max_width = 8192;
10841
		dev->mode_config.max_height = 8192;
10842
	}
3480 Serge 10843
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
2330 Serge 10844
 
3031 serge 10845
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
3746 Serge 10846
		      INTEL_INFO(dev)->num_pipes,
10847
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
2330 Serge 10848
 
4104 Serge 10849
	for_each_pipe(i) {
3031 serge 10850
		intel_crtc_init(dev, i);
3746 Serge 10851
		for (j = 0; j < dev_priv->num_plane; j++) {
10852
			ret = intel_plane_init(dev, i, j);
3031 serge 10853
		if (ret)
4104 Serge 10854
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
10855
					      pipe_name(i), sprite_name(i, j), ret);
3746 Serge 10856
		}
2330 Serge 10857
	}
10858
 
4560 Serge 10859
	intel_init_dpio(dev);
10860
	intel_reset_dpio(dev);
10861
 
3243 Serge 10862
	intel_cpu_pll_init(dev);
4104 Serge 10863
	intel_shared_dpll_init(dev);
2330 Serge 10864
 
3031 serge 10865
	/* Just disable it once at startup */
10866
	i915_disable_vga(dev);
10867
	intel_setup_outputs(dev);
3480 Serge 10868
 
10869
	/* Just in case the BIOS is doing something questionable. */
10870
	intel_disable_fbc(dev);
3031 serge 10871
}
2330 Serge 10872
 
3031 serge 10873
static void
10874
intel_connector_break_all_links(struct intel_connector *connector)
10875
{
10876
	connector->base.dpms = DRM_MODE_DPMS_OFF;
10877
	connector->base.encoder = NULL;
10878
	connector->encoder->connectors_active = false;
10879
	connector->encoder->base.crtc = NULL;
2330 Serge 10880
}
10881
 
3031 serge 10882
static void intel_enable_pipe_a(struct drm_device *dev)
2330 Serge 10883
{
3031 serge 10884
	struct intel_connector *connector;
10885
	struct drm_connector *crt = NULL;
10886
	struct intel_load_detect_pipe load_detect_temp;
2330 Serge 10887
 
3031 serge 10888
	/* We can't just switch on the pipe A, we need to set things up with a
10889
	 * proper mode and output configuration. As a gross hack, enable pipe A
10890
	 * by enabling the load detect pipe once. */
10891
	list_for_each_entry(connector,
10892
			    &dev->mode_config.connector_list,
10893
			    base.head) {
10894
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
10895
			crt = &connector->base;
10896
			break;
2330 Serge 10897
		}
10898
	}
10899
 
3031 serge 10900
	if (!crt)
10901
		return;
2330 Serge 10902
 
3031 serge 10903
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
10904
		intel_release_load_detect_pipe(crt, &load_detect_temp);
2327 Serge 10905
 
10906
 
10907
}
10908
 
3031 serge 10909
static bool
10910
intel_check_plane_mapping(struct intel_crtc *crtc)
2327 Serge 10911
{
3746 Serge 10912
	struct drm_device *dev = crtc->base.dev;
10913
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10914
	u32 reg, val;
2327 Serge 10915
 
3746 Serge 10916
	if (INTEL_INFO(dev)->num_pipes == 1)
3031 serge 10917
		return true;
2327 Serge 10918
 
3031 serge 10919
	reg = DSPCNTR(!crtc->plane);
10920
	val = I915_READ(reg);
2327 Serge 10921
 
3031 serge 10922
	if ((val & DISPLAY_PLANE_ENABLE) &&
10923
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
10924
		return false;
2327 Serge 10925
 
3031 serge 10926
	return true;
2327 Serge 10927
}
10928
 
3031 serge 10929
static void intel_sanitize_crtc(struct intel_crtc *crtc)
2327 Serge 10930
{
3031 serge 10931
	struct drm_device *dev = crtc->base.dev;
2327 Serge 10932
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10933
	u32 reg;
2327 Serge 10934
 
3031 serge 10935
	/* Clear any frame start delays used for debugging left by the BIOS */
3746 Serge 10936
	reg = PIPECONF(crtc->config.cpu_transcoder);
3031 serge 10937
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
2327 Serge 10938
 
3031 serge 10939
	/* We need to sanitize the plane -> pipe mapping first because this will
10940
	 * disable the crtc (and hence change the state) if it is wrong. Note
10941
	 * that gen4+ has a fixed plane -> pipe mapping.  */
10942
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
10943
		struct intel_connector *connector;
10944
		bool plane;
2327 Serge 10945
 
3031 serge 10946
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
10947
			      crtc->base.base.id);
2327 Serge 10948
 
3031 serge 10949
		/* Pipe has the wrong plane attached and the plane is active.
10950
		 * Temporarily change the plane mapping and disable everything
10951
		 * ...  */
10952
		plane = crtc->plane;
10953
		crtc->plane = !plane;
10954
		dev_priv->display.crtc_disable(&crtc->base);
10955
		crtc->plane = plane;
2342 Serge 10956
 
3031 serge 10957
		/* ... and break all links. */
10958
		list_for_each_entry(connector, &dev->mode_config.connector_list,
10959
				    base.head) {
10960
			if (connector->encoder->base.crtc != &crtc->base)
10961
				continue;
2327 Serge 10962
 
3031 serge 10963
			intel_connector_break_all_links(connector);
10964
		}
2327 Serge 10965
 
3031 serge 10966
		WARN_ON(crtc->active);
10967
		crtc->base.enabled = false;
10968
	}
2327 Serge 10969
 
3031 serge 10970
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
10971
	    crtc->pipe == PIPE_A && !crtc->active) {
10972
		/* BIOS forgot to enable pipe A, this mostly happens after
10973
		 * resume. Force-enable the pipe to fix this, the update_dpms
10974
		 * call below we restore the pipe to the right state, but leave
10975
		 * the required bits on. */
10976
		intel_enable_pipe_a(dev);
10977
	}
2327 Serge 10978
 
3031 serge 10979
	/* Adjust the state of the output pipe according to whether we
10980
	 * have active connectors/encoders. */
10981
	intel_crtc_update_dpms(&crtc->base);
2327 Serge 10982
 
3031 serge 10983
	if (crtc->active != crtc->base.enabled) {
10984
		struct intel_encoder *encoder;
2327 Serge 10985
 
3031 serge 10986
		/* This can happen either due to bugs in the get_hw_state
10987
		 * functions or because the pipe is force-enabled due to the
10988
		 * pipe A quirk. */
10989
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
10990
			      crtc->base.base.id,
10991
			      crtc->base.enabled ? "enabled" : "disabled",
10992
			      crtc->active ? "enabled" : "disabled");
2327 Serge 10993
 
3031 serge 10994
		crtc->base.enabled = crtc->active;
2327 Serge 10995
 
3031 serge 10996
		/* Because we only establish the connector -> encoder ->
10997
		 * crtc links if something is active, this means the
10998
		 * crtc is now deactivated. Break the links. connector
10999
		 * -> encoder links are only establish when things are
11000
		 *  actually up, hence no need to break them. */
11001
		WARN_ON(crtc->active);
2327 Serge 11002
 
3031 serge 11003
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
11004
			WARN_ON(encoder->connectors_active);
11005
			encoder->base.crtc = NULL;
11006
		}
11007
	}
2327 Serge 11008
}
11009
 
3031 serge 11010
static void intel_sanitize_encoder(struct intel_encoder *encoder)
2327 Serge 11011
{
3031 serge 11012
	struct intel_connector *connector;
11013
	struct drm_device *dev = encoder->base.dev;
2327 Serge 11014
 
3031 serge 11015
	/* We need to check both for a crtc link (meaning that the
11016
	 * encoder is active and trying to read from a pipe) and the
11017
	 * pipe itself being active. */
11018
	bool has_active_crtc = encoder->base.crtc &&
11019
		to_intel_crtc(encoder->base.crtc)->active;
2327 Serge 11020
 
3031 serge 11021
	if (encoder->connectors_active && !has_active_crtc) {
11022
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
11023
			      encoder->base.base.id,
11024
			      drm_get_encoder_name(&encoder->base));
2327 Serge 11025
 
3031 serge 11026
		/* Connector is active, but has no active pipe. This is
11027
		 * fallout from our resume register restoring. Disable
11028
		 * the encoder manually again. */
11029
		if (encoder->base.crtc) {
11030
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
11031
				      encoder->base.base.id,
11032
				      drm_get_encoder_name(&encoder->base));
11033
			encoder->disable(encoder);
11034
		}
2327 Serge 11035
 
3031 serge 11036
		/* Inconsistent output/port/pipe state happens presumably due to
11037
		 * a bug in one of the get_hw_state functions. Or someplace else
11038
		 * in our code, like the register restore mess on resume. Clamp
11039
		 * things to off as a safer default. */
11040
		list_for_each_entry(connector,
11041
				    &dev->mode_config.connector_list,
11042
				    base.head) {
11043
			if (connector->encoder != encoder)
11044
				continue;
2327 Serge 11045
 
3031 serge 11046
			intel_connector_break_all_links(connector);
11047
		}
11048
	}
11049
	/* Enabled encoders without active connectors will be fixed in
11050
	 * the crtc fixup. */
2327 Serge 11051
}
11052
 
3746 Serge 11053
void i915_redisable_vga(struct drm_device *dev)
11054
{
11055
	struct drm_i915_private *dev_priv = dev->dev_private;
11056
	u32 vga_reg = i915_vgacntrl_reg(dev);
11057
 
4104 Serge 11058
	/* This function can be called both from intel_modeset_setup_hw_state or
11059
	 * at a very early point in our resume sequence, where the power well
11060
	 * structures are not yet restored. Since this function is at a very
11061
	 * paranoid "someone might have enabled VGA while we were not looking"
11062
	 * level, just check if the power well is enabled instead of trying to
11063
	 * follow the "don't touch the power well if we don't need it" policy
11064
	 * the rest of the driver uses. */
4560 Serge 11065
	if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
4104 Serge 11066
	    (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
11067
		return;
11068
 
4560 Serge 11069
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
3746 Serge 11070
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
11071
		i915_disable_vga(dev);
11072
	}
11073
}
11074
 
4104 Serge 11075
static void intel_modeset_readout_hw_state(struct drm_device *dev)
2332 Serge 11076
{
11077
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 11078
	enum pipe pipe;
11079
	struct intel_crtc *crtc;
11080
	struct intel_encoder *encoder;
11081
	struct intel_connector *connector;
4104 Serge 11082
	int i;
2327 Serge 11083
 
3746 Serge 11084
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11085
			    base.head) {
11086
		memset(&crtc->config, 0, sizeof(crtc->config));
2327 Serge 11087
 
3746 Serge 11088
		crtc->active = dev_priv->display.get_pipe_config(crtc,
11089
								 &crtc->config);
2327 Serge 11090
 
3031 serge 11091
		crtc->base.enabled = crtc->active;
4560 Serge 11092
		crtc->primary_enabled = crtc->active;
2330 Serge 11093
 
3031 serge 11094
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
11095
			      crtc->base.base.id,
11096
			      crtc->active ? "enabled" : "disabled");
2339 Serge 11097
	}
2332 Serge 11098
 
4104 Serge 11099
	/* FIXME: Smash this into the new shared dpll infrastructure. */
3480 Serge 11100
	if (HAS_DDI(dev))
3243 Serge 11101
		intel_ddi_setup_hw_pll_state(dev);
11102
 
4104 Serge 11103
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11104
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
11105
 
11106
		pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
11107
		pll->active = 0;
11108
		list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11109
				    base.head) {
11110
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
11111
				pll->active++;
11112
		}
11113
		pll->refcount = pll->active;
11114
 
11115
		DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
11116
			      pll->name, pll->refcount, pll->on);
11117
	}
11118
 
3031 serge 11119
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
11120
			    base.head) {
11121
		pipe = 0;
2332 Serge 11122
 
3031 serge 11123
		if (encoder->get_hw_state(encoder, &pipe)) {
4104 Serge 11124
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
11125
			encoder->base.crtc = &crtc->base;
11126
				encoder->get_config(encoder, &crtc->config);
3031 serge 11127
		} else {
11128
			encoder->base.crtc = NULL;
11129
		}
2332 Serge 11130
 
3031 serge 11131
		encoder->connectors_active = false;
4560 Serge 11132
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
3031 serge 11133
			      encoder->base.base.id,
11134
			      drm_get_encoder_name(&encoder->base),
11135
			      encoder->base.crtc ? "enabled" : "disabled",
4560 Serge 11136
			      pipe_name(pipe));
3031 serge 11137
	}
2332 Serge 11138
 
3031 serge 11139
	list_for_each_entry(connector, &dev->mode_config.connector_list,
11140
			    base.head) {
11141
		if (connector->get_hw_state(connector)) {
11142
			connector->base.dpms = DRM_MODE_DPMS_ON;
11143
			connector->encoder->connectors_active = true;
11144
			connector->base.encoder = &connector->encoder->base;
11145
		} else {
11146
			connector->base.dpms = DRM_MODE_DPMS_OFF;
11147
			connector->base.encoder = NULL;
11148
		}
11149
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
11150
			      connector->base.base.id,
11151
			      drm_get_connector_name(&connector->base),
11152
			      connector->base.encoder ? "enabled" : "disabled");
2332 Serge 11153
	}
4104 Serge 11154
}
2332 Serge 11155
 
4104 Serge 11156
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
11157
 * and i915 state tracking structures. */
11158
void intel_modeset_setup_hw_state(struct drm_device *dev,
11159
				  bool force_restore)
11160
{
11161
	struct drm_i915_private *dev_priv = dev->dev_private;
11162
	enum pipe pipe;
11163
	struct intel_crtc *crtc;
11164
	struct intel_encoder *encoder;
11165
	int i;
11166
 
11167
	intel_modeset_readout_hw_state(dev);
11168
 
11169
	/*
11170
	 * Now that we have the config, copy it to each CRTC struct
11171
	 * Note that this could go away if we move to using crtc_config
11172
	 * checking everywhere.
11173
	 */
11174
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11175
			    base.head) {
11176
		if (crtc->active && i915_fastboot) {
11177
			intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
11178
 
11179
			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
11180
				      crtc->base.base.id);
11181
			drm_mode_debug_printmodeline(&crtc->base.mode);
11182
		}
11183
	}
11184
 
3031 serge 11185
	/* HW state is read out, now we need to sanitize this mess. */
11186
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
11187
			    base.head) {
11188
		intel_sanitize_encoder(encoder);
2332 Serge 11189
	}
11190
 
3031 serge 11191
	for_each_pipe(pipe) {
11192
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
11193
		intel_sanitize_crtc(crtc);
4104 Serge 11194
		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
2332 Serge 11195
	}
11196
 
4104 Serge 11197
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11198
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
11199
 
11200
		if (!pll->on || pll->active)
11201
			continue;
11202
 
11203
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
11204
 
11205
		pll->disable(dev_priv, pll);
11206
		pll->on = false;
11207
	}
11208
 
4560 Serge 11209
	if (HAS_PCH_SPLIT(dev))
11210
		ilk_wm_get_hw_state(dev);
11211
 
3243 Serge 11212
	if (force_restore) {
4560 Serge 11213
		i915_redisable_vga(dev);
11214
 
3746 Serge 11215
		/*
11216
		 * We need to use raw interfaces for restoring state to avoid
11217
		 * checking (bogus) intermediate states.
11218
		 */
3243 Serge 11219
		for_each_pipe(pipe) {
3746 Serge 11220
			struct drm_crtc *crtc =
11221
				dev_priv->pipe_to_crtc_mapping[pipe];
11222
 
11223
			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
11224
					 crtc->fb);
3243 Serge 11225
		}
11226
	} else {
3031 serge 11227
	intel_modeset_update_staged_output_state(dev);
3243 Serge 11228
	}
2332 Serge 11229
 
3031 serge 11230
	intel_modeset_check_state(dev);
2332 Serge 11231
}
11232
 
3031 serge 11233
void intel_modeset_gem_init(struct drm_device *dev)
2330 Serge 11234
{
3031 serge 11235
	intel_modeset_init_hw(dev);
2330 Serge 11236
 
3031 serge 11237
//   intel_setup_overlay(dev);
2330 Serge 11238
 
4539 Serge 11239
	mutex_lock(&dev->mode_config.mutex);
4560 Serge 11240
	drm_mode_config_reset(dev);
3243 Serge 11241
	intel_modeset_setup_hw_state(dev, false);
4539 Serge 11242
	mutex_unlock(&dev->mode_config.mutex);
2330 Serge 11243
}
11244
 
3031 serge 11245
void intel_modeset_cleanup(struct drm_device *dev)
2327 Serge 11246
{
3031 serge 11247
#if 0
11248
	struct drm_i915_private *dev_priv = dev->dev_private;
11249
	struct drm_crtc *crtc;
4560 Serge 11250
	struct drm_connector *connector;
2327 Serge 11251
 
4104 Serge 11252
	/*
11253
	 * Interrupts and polling as the first thing to avoid creating havoc.
11254
	 * Too much stuff here (turning of rps, connectors, ...) would
11255
	 * experience fancy races otherwise.
11256
	 */
11257
	drm_irq_uninstall(dev);
11258
	cancel_work_sync(&dev_priv->hotplug_work);
11259
	/*
11260
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
11261
	 * poll handlers. Hence disable polling after hpd handling is shut down.
11262
	 */
4560 Serge 11263
	drm_kms_helper_poll_fini(dev);
4104 Serge 11264
 
3031 serge 11265
	mutex_lock(&dev->struct_mutex);
2327 Serge 11266
 
4560 Serge 11267
	intel_unregister_dsm_handler();
2327 Serge 11268
 
3031 serge 11269
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
11270
		/* Skip inactive CRTCs */
11271
		if (!crtc->fb)
11272
			continue;
2342 Serge 11273
 
3031 serge 11274
		intel_increase_pllclock(crtc);
11275
	}
2342 Serge 11276
 
3031 serge 11277
	intel_disable_fbc(dev);
2342 Serge 11278
 
3031 serge 11279
	intel_disable_gt_powersave(dev);
2342 Serge 11280
 
3031 serge 11281
	ironlake_teardown_rc6(dev);
2327 Serge 11282
 
3031 serge 11283
	mutex_unlock(&dev->struct_mutex);
2327 Serge 11284
 
4104 Serge 11285
	/* flush any delayed tasks or pending work */
11286
	flush_scheduled_work();
2327 Serge 11287
 
4560 Serge 11288
	/* destroy the backlight and sysfs files before encoders/connectors */
11289
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
11290
		intel_panel_destroy_backlight(connector);
11291
		drm_sysfs_connector_remove(connector);
11292
	}
2327 Serge 11293
 
3031 serge 11294
	drm_mode_config_cleanup(dev);
2327 Serge 11295
#endif
11296
}
11297
 
11298
/*
3031 serge 11299
 * Return which encoder is currently attached for connector.
2327 Serge 11300
 */
3031 serge 11301
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
2327 Serge 11302
{
3031 serge 11303
	return &intel_attached_encoder(connector)->base;
11304
}
2327 Serge 11305
 
3031 serge 11306
void intel_connector_attach_encoder(struct intel_connector *connector,
11307
				    struct intel_encoder *encoder)
11308
{
11309
	connector->encoder = encoder;
11310
	drm_mode_connector_attach_encoder(&connector->base,
11311
					  &encoder->base);
2327 Serge 11312
}
11313
 
11314
/*
3031 serge 11315
 * set vga decode state - true == enable VGA decode
2327 Serge 11316
 */
3031 serge 11317
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
2327 Serge 11318
{
2330 Serge 11319
	struct drm_i915_private *dev_priv = dev->dev_private;
4539 Serge 11320
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
3031 serge 11321
	u16 gmch_ctrl;
2327 Serge 11322
 
4539 Serge 11323
	pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
3031 serge 11324
	if (state)
11325
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
2330 Serge 11326
	else
3031 serge 11327
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
4539 Serge 11328
	pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
3031 serge 11329
	return 0;
2330 Serge 11330
}
11331
 
3031 serge 11332
#ifdef CONFIG_DEBUG_FS
2327 Serge 11333
 
3031 serge 11334
struct intel_display_error_state {
4104 Serge 11335
 
11336
	u32 power_well_driver;
11337
 
11338
	int num_transcoders;
11339
 
3031 serge 11340
	struct intel_cursor_error_state {
11341
		u32 control;
11342
		u32 position;
11343
		u32 base;
11344
		u32 size;
11345
	} cursor[I915_MAX_PIPES];
2327 Serge 11346
 
3031 serge 11347
	struct intel_pipe_error_state {
4560 Serge 11348
		bool power_domain_on;
3031 serge 11349
		u32 source;
11350
	} pipe[I915_MAX_PIPES];
2327 Serge 11351
 
3031 serge 11352
	struct intel_plane_error_state {
11353
		u32 control;
11354
		u32 stride;
11355
		u32 size;
11356
		u32 pos;
11357
		u32 addr;
11358
		u32 surface;
11359
		u32 tile_offset;
11360
	} plane[I915_MAX_PIPES];
4104 Serge 11361
 
11362
	struct intel_transcoder_error_state {
4560 Serge 11363
		bool power_domain_on;
4104 Serge 11364
		enum transcoder cpu_transcoder;
11365
 
11366
		u32 conf;
11367
 
11368
		u32 htotal;
11369
		u32 hblank;
11370
		u32 hsync;
11371
		u32 vtotal;
11372
		u32 vblank;
11373
		u32 vsync;
11374
	} transcoder[4];
3031 serge 11375
};
2327 Serge 11376
 
3031 serge 11377
struct intel_display_error_state *
11378
intel_display_capture_error_state(struct drm_device *dev)
11379
{
11380
	drm_i915_private_t *dev_priv = dev->dev_private;
11381
	struct intel_display_error_state *error;
4104 Serge 11382
	int transcoders[] = {
11383
		TRANSCODER_A,
11384
		TRANSCODER_B,
11385
		TRANSCODER_C,
11386
		TRANSCODER_EDP,
11387
	};
3031 serge 11388
	int i;
2327 Serge 11389
 
4104 Serge 11390
	if (INTEL_INFO(dev)->num_pipes == 0)
11391
		return NULL;
11392
 
4560 Serge 11393
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
3031 serge 11394
	if (error == NULL)
11395
		return NULL;
2327 Serge 11396
 
4560 Serge 11397
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 11398
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
11399
 
3031 serge 11400
	for_each_pipe(i) {
4560 Serge 11401
		error->pipe[i].power_domain_on =
11402
			intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
11403
		if (!error->pipe[i].power_domain_on)
11404
			continue;
11405
 
3746 Serge 11406
		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
3031 serge 11407
		error->cursor[i].control = I915_READ(CURCNTR(i));
11408
		error->cursor[i].position = I915_READ(CURPOS(i));
11409
		error->cursor[i].base = I915_READ(CURBASE(i));
3746 Serge 11410
		} else {
11411
			error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
11412
			error->cursor[i].position = I915_READ(CURPOS_IVB(i));
11413
			error->cursor[i].base = I915_READ(CURBASE_IVB(i));
11414
		}
2327 Serge 11415
 
3031 serge 11416
		error->plane[i].control = I915_READ(DSPCNTR(i));
11417
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
3746 Serge 11418
		if (INTEL_INFO(dev)->gen <= 3) {
3031 serge 11419
		error->plane[i].size = I915_READ(DSPSIZE(i));
11420
		error->plane[i].pos = I915_READ(DSPPOS(i));
3746 Serge 11421
		}
11422
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3031 serge 11423
		error->plane[i].addr = I915_READ(DSPADDR(i));
11424
		if (INTEL_INFO(dev)->gen >= 4) {
11425
			error->plane[i].surface = I915_READ(DSPSURF(i));
11426
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
11427
		}
2327 Serge 11428
 
3031 serge 11429
		error->pipe[i].source = I915_READ(PIPESRC(i));
11430
	}
2327 Serge 11431
 
4104 Serge 11432
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
11433
	if (HAS_DDI(dev_priv->dev))
11434
		error->num_transcoders++; /* Account for eDP. */
11435
 
11436
	for (i = 0; i < error->num_transcoders; i++) {
11437
		enum transcoder cpu_transcoder = transcoders[i];
11438
 
4560 Serge 11439
		error->transcoder[i].power_domain_on =
11440
			intel_display_power_enabled_sw(dev,
11441
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
11442
		if (!error->transcoder[i].power_domain_on)
11443
			continue;
11444
 
4104 Serge 11445
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
11446
 
11447
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
11448
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
11449
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
11450
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
11451
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
11452
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
11453
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
11454
	}
11455
 
3031 serge 11456
	return error;
2330 Serge 11457
}
2327 Serge 11458
 
4104 Serge 11459
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
11460
 
3031 serge 11461
void
4104 Serge 11462
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
3031 serge 11463
				struct drm_device *dev,
11464
				struct intel_display_error_state *error)
2332 Serge 11465
{
3031 serge 11466
	int i;
2330 Serge 11467
 
4104 Serge 11468
	if (!error)
11469
		return;
11470
 
11471
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
4560 Serge 11472
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4104 Serge 11473
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
11474
			   error->power_well_driver);
3031 serge 11475
	for_each_pipe(i) {
4104 Serge 11476
		err_printf(m, "Pipe [%d]:\n", i);
4560 Serge 11477
		err_printf(m, "  Power: %s\n",
11478
			   error->pipe[i].power_domain_on ? "on" : "off");
4104 Serge 11479
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
2332 Serge 11480
 
4104 Serge 11481
		err_printf(m, "Plane [%d]:\n", i);
11482
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
11483
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
3746 Serge 11484
		if (INTEL_INFO(dev)->gen <= 3) {
4104 Serge 11485
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
11486
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
3746 Serge 11487
		}
11488
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
4104 Serge 11489
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
3031 serge 11490
		if (INTEL_INFO(dev)->gen >= 4) {
4104 Serge 11491
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
11492
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
3031 serge 11493
		}
2332 Serge 11494
 
4104 Serge 11495
		err_printf(m, "Cursor [%d]:\n", i);
11496
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
11497
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
11498
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
3031 serge 11499
	}
4104 Serge 11500
 
11501
	for (i = 0; i < error->num_transcoders; i++) {
4560 Serge 11502
		err_printf(m, "CPU transcoder: %c\n",
4104 Serge 11503
			   transcoder_name(error->transcoder[i].cpu_transcoder));
4560 Serge 11504
		err_printf(m, "  Power: %s\n",
11505
			   error->transcoder[i].power_domain_on ? "on" : "off");
4104 Serge 11506
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
11507
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
11508
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
11509
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
11510
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
11511
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
11512
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
11513
	}
2327 Serge 11514
}
3031 serge 11515
#endif