Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 4560 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
3746 Serge 27
//#include 
2327 Serge 28
#include 
29
//#include 
30
#include 
31
#include 
2330 Serge 32
#include 
3746 Serge 33
#include 
2342 Serge 34
#include 
3031 serge 35
#include 
2327 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2327 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
3031 serge 40
#include 
41
#include 
42
//#include 
2327 Serge 43
 
4104 Serge 44
#define MAX_ERRNO       4095
2327 Serge 45
phys_addr_t get_bus_addr(void);
46
 
2342 Serge 47
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
2327 Serge 48
static void intel_increase_pllclock(struct drm_crtc *crtc);
3243 Serge 49
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
2327 Serge 50
 
4104 Serge 51
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
52
				struct intel_crtc_config *pipe_config);
53
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
54
				    struct intel_crtc_config *pipe_config);
2327 Serge 55
 
4104 Serge 56
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
57
			  int x, int y, struct drm_framebuffer *old_fb);
58
 
59
 
2327 Serge 60
typedef struct {
61
    int min, max;
62
} intel_range_t;
63
 
64
typedef struct {
65
    int dot_limit;
66
    int p2_slow, p2_fast;
67
} intel_p2_t;
68
 
69
typedef struct intel_limit intel_limit_t;
70
struct intel_limit {
71
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
72
    intel_p2_t      p2;
73
};
74
 
75
/* FDI */
76
#define IRONLAKE_FDI_FREQ       2700000 /* in kHz for mode->clock */
77
 
3243 Serge 78
int
79
intel_pch_rawclk(struct drm_device *dev)
80
{
81
	struct drm_i915_private *dev_priv = dev->dev_private;
82
 
83
	WARN_ON(!HAS_PCH_SPLIT(dev));
84
 
85
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
86
}
87
 
2327 Serge 88
static inline u32 /* units of 100MHz */
89
intel_fdi_link_freq(struct drm_device *dev)
90
{
91
	if (IS_GEN5(dev)) {
92
		struct drm_i915_private *dev_priv = dev->dev_private;
93
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
94
	} else
95
		return 27;
96
}
97
 
4104 Serge 98
static const intel_limit_t intel_limits_i8xx_dac = {
99
	.dot = { .min = 25000, .max = 350000 },
100
	.vco = { .min = 930000, .max = 1400000 },
101
	.n = { .min = 3, .max = 16 },
102
	.m = { .min = 96, .max = 140 },
103
	.m1 = { .min = 18, .max = 26 },
104
	.m2 = { .min = 6, .max = 16 },
105
	.p = { .min = 4, .max = 128 },
106
	.p1 = { .min = 2, .max = 33 },
107
	.p2 = { .dot_limit = 165000,
108
		.p2_slow = 4, .p2_fast = 2 },
109
};
110
 
2327 Serge 111
static const intel_limit_t intel_limits_i8xx_dvo = {
112
        .dot = { .min = 25000, .max = 350000 },
113
        .vco = { .min = 930000, .max = 1400000 },
114
        .n = { .min = 3, .max = 16 },
115
        .m = { .min = 96, .max = 140 },
116
        .m1 = { .min = 18, .max = 26 },
117
        .m2 = { .min = 6, .max = 16 },
118
        .p = { .min = 4, .max = 128 },
119
        .p1 = { .min = 2, .max = 33 },
120
	.p2 = { .dot_limit = 165000,
4104 Serge 121
		.p2_slow = 4, .p2_fast = 4 },
2327 Serge 122
};
123
 
124
static const intel_limit_t intel_limits_i8xx_lvds = {
125
        .dot = { .min = 25000, .max = 350000 },
126
        .vco = { .min = 930000, .max = 1400000 },
127
        .n = { .min = 3, .max = 16 },
128
        .m = { .min = 96, .max = 140 },
129
        .m1 = { .min = 18, .max = 26 },
130
        .m2 = { .min = 6, .max = 16 },
131
        .p = { .min = 4, .max = 128 },
132
        .p1 = { .min = 1, .max = 6 },
133
	.p2 = { .dot_limit = 165000,
134
		.p2_slow = 14, .p2_fast = 7 },
135
};
136
 
137
static const intel_limit_t intel_limits_i9xx_sdvo = {
138
        .dot = { .min = 20000, .max = 400000 },
139
        .vco = { .min = 1400000, .max = 2800000 },
140
        .n = { .min = 1, .max = 6 },
141
        .m = { .min = 70, .max = 120 },
3480 Serge 142
	.m1 = { .min = 8, .max = 18 },
143
	.m2 = { .min = 3, .max = 7 },
2327 Serge 144
        .p = { .min = 5, .max = 80 },
145
        .p1 = { .min = 1, .max = 8 },
146
	.p2 = { .dot_limit = 200000,
147
		.p2_slow = 10, .p2_fast = 5 },
148
};
149
 
150
static const intel_limit_t intel_limits_i9xx_lvds = {
151
        .dot = { .min = 20000, .max = 400000 },
152
        .vco = { .min = 1400000, .max = 2800000 },
153
        .n = { .min = 1, .max = 6 },
154
        .m = { .min = 70, .max = 120 },
3480 Serge 155
	.m1 = { .min = 8, .max = 18 },
156
	.m2 = { .min = 3, .max = 7 },
2327 Serge 157
        .p = { .min = 7, .max = 98 },
158
        .p1 = { .min = 1, .max = 8 },
159
	.p2 = { .dot_limit = 112000,
160
		.p2_slow = 14, .p2_fast = 7 },
161
};
162
 
163
 
164
static const intel_limit_t intel_limits_g4x_sdvo = {
165
	.dot = { .min = 25000, .max = 270000 },
166
	.vco = { .min = 1750000, .max = 3500000},
167
	.n = { .min = 1, .max = 4 },
168
	.m = { .min = 104, .max = 138 },
169
	.m1 = { .min = 17, .max = 23 },
170
	.m2 = { .min = 5, .max = 11 },
171
	.p = { .min = 10, .max = 30 },
172
	.p1 = { .min = 1, .max = 3},
173
	.p2 = { .dot_limit = 270000,
174
		.p2_slow = 10,
175
		.p2_fast = 10
176
	},
177
};
178
 
179
static const intel_limit_t intel_limits_g4x_hdmi = {
180
	.dot = { .min = 22000, .max = 400000 },
181
	.vco = { .min = 1750000, .max = 3500000},
182
	.n = { .min = 1, .max = 4 },
183
	.m = { .min = 104, .max = 138 },
184
	.m1 = { .min = 16, .max = 23 },
185
	.m2 = { .min = 5, .max = 11 },
186
	.p = { .min = 5, .max = 80 },
187
	.p1 = { .min = 1, .max = 8},
188
	.p2 = { .dot_limit = 165000,
189
		.p2_slow = 10, .p2_fast = 5 },
190
};
191
 
192
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
193
	.dot = { .min = 20000, .max = 115000 },
194
	.vco = { .min = 1750000, .max = 3500000 },
195
	.n = { .min = 1, .max = 3 },
196
	.m = { .min = 104, .max = 138 },
197
	.m1 = { .min = 17, .max = 23 },
198
	.m2 = { .min = 5, .max = 11 },
199
	.p = { .min = 28, .max = 112 },
200
	.p1 = { .min = 2, .max = 8 },
201
	.p2 = { .dot_limit = 0,
202
		.p2_slow = 14, .p2_fast = 14
203
	},
204
};
205
 
206
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
207
	.dot = { .min = 80000, .max = 224000 },
208
	.vco = { .min = 1750000, .max = 3500000 },
209
	.n = { .min = 1, .max = 3 },
210
	.m = { .min = 104, .max = 138 },
211
	.m1 = { .min = 17, .max = 23 },
212
	.m2 = { .min = 5, .max = 11 },
213
	.p = { .min = 14, .max = 42 },
214
	.p1 = { .min = 2, .max = 6 },
215
	.p2 = { .dot_limit = 0,
216
		.p2_slow = 7, .p2_fast = 7
217
	},
218
};
219
 
220
static const intel_limit_t intel_limits_pineview_sdvo = {
221
        .dot = { .min = 20000, .max = 400000},
222
        .vco = { .min = 1700000, .max = 3500000 },
223
	/* Pineview's Ncounter is a ring counter */
224
        .n = { .min = 3, .max = 6 },
225
        .m = { .min = 2, .max = 256 },
226
	/* Pineview only has one combined m divider, which we treat as m2. */
227
        .m1 = { .min = 0, .max = 0 },
228
        .m2 = { .min = 0, .max = 254 },
229
        .p = { .min = 5, .max = 80 },
230
        .p1 = { .min = 1, .max = 8 },
231
	.p2 = { .dot_limit = 200000,
232
		.p2_slow = 10, .p2_fast = 5 },
233
};
234
 
235
static const intel_limit_t intel_limits_pineview_lvds = {
236
        .dot = { .min = 20000, .max = 400000 },
237
        .vco = { .min = 1700000, .max = 3500000 },
238
        .n = { .min = 3, .max = 6 },
239
        .m = { .min = 2, .max = 256 },
240
        .m1 = { .min = 0, .max = 0 },
241
        .m2 = { .min = 0, .max = 254 },
242
        .p = { .min = 7, .max = 112 },
243
        .p1 = { .min = 1, .max = 8 },
244
	.p2 = { .dot_limit = 112000,
245
		.p2_slow = 14, .p2_fast = 14 },
246
};
247
 
248
/* Ironlake / Sandybridge
249
 *
250
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
251
 * the range value for them is (actual_value - 2).
252
 */
253
static const intel_limit_t intel_limits_ironlake_dac = {
254
	.dot = { .min = 25000, .max = 350000 },
255
	.vco = { .min = 1760000, .max = 3510000 },
256
	.n = { .min = 1, .max = 5 },
257
	.m = { .min = 79, .max = 127 },
258
	.m1 = { .min = 12, .max = 22 },
259
	.m2 = { .min = 5, .max = 9 },
260
	.p = { .min = 5, .max = 80 },
261
	.p1 = { .min = 1, .max = 8 },
262
	.p2 = { .dot_limit = 225000,
263
		.p2_slow = 10, .p2_fast = 5 },
264
};
265
 
266
static const intel_limit_t intel_limits_ironlake_single_lvds = {
267
	.dot = { .min = 25000, .max = 350000 },
268
	.vco = { .min = 1760000, .max = 3510000 },
269
	.n = { .min = 1, .max = 3 },
270
	.m = { .min = 79, .max = 118 },
271
	.m1 = { .min = 12, .max = 22 },
272
	.m2 = { .min = 5, .max = 9 },
273
	.p = { .min = 28, .max = 112 },
274
	.p1 = { .min = 2, .max = 8 },
275
	.p2 = { .dot_limit = 225000,
276
		.p2_slow = 14, .p2_fast = 14 },
277
};
278
 
279
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
280
	.dot = { .min = 25000, .max = 350000 },
281
	.vco = { .min = 1760000, .max = 3510000 },
282
	.n = { .min = 1, .max = 3 },
283
	.m = { .min = 79, .max = 127 },
284
	.m1 = { .min = 12, .max = 22 },
285
	.m2 = { .min = 5, .max = 9 },
286
	.p = { .min = 14, .max = 56 },
287
	.p1 = { .min = 2, .max = 8 },
288
	.p2 = { .dot_limit = 225000,
289
		.p2_slow = 7, .p2_fast = 7 },
290
};
291
 
292
/* LVDS 100mhz refclk limits. */
293
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
294
	.dot = { .min = 25000, .max = 350000 },
295
	.vco = { .min = 1760000, .max = 3510000 },
296
	.n = { .min = 1, .max = 2 },
297
	.m = { .min = 79, .max = 126 },
298
	.m1 = { .min = 12, .max = 22 },
299
	.m2 = { .min = 5, .max = 9 },
300
	.p = { .min = 28, .max = 112 },
2342 Serge 301
	.p1 = { .min = 2, .max = 8 },
2327 Serge 302
	.p2 = { .dot_limit = 225000,
303
		.p2_slow = 14, .p2_fast = 14 },
304
};
305
 
306
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
307
	.dot = { .min = 25000, .max = 350000 },
308
	.vco = { .min = 1760000, .max = 3510000 },
309
	.n = { .min = 1, .max = 3 },
310
	.m = { .min = 79, .max = 126 },
311
	.m1 = { .min = 12, .max = 22 },
312
	.m2 = { .min = 5, .max = 9 },
313
	.p = { .min = 14, .max = 42 },
2342 Serge 314
	.p1 = { .min = 2, .max = 6 },
2327 Serge 315
	.p2 = { .dot_limit = 225000,
316
		.p2_slow = 7, .p2_fast = 7 },
317
};
318
 
3031 serge 319
static const intel_limit_t intel_limits_vlv_dac = {
320
	.dot = { .min = 25000, .max = 270000 },
321
	.vco = { .min = 4000000, .max = 6000000 },
322
	.n = { .min = 1, .max = 7 },
323
	.m = { .min = 22, .max = 450 }, /* guess */
324
	.m1 = { .min = 2, .max = 3 },
325
	.m2 = { .min = 11, .max = 156 },
326
	.p = { .min = 10, .max = 30 },
4104 Serge 327
	.p1 = { .min = 1, .max = 3 },
3031 serge 328
	.p2 = { .dot_limit = 270000,
329
		.p2_slow = 2, .p2_fast = 20 },
330
};
331
 
332
static const intel_limit_t intel_limits_vlv_hdmi = {
4104 Serge 333
	.dot = { .min = 25000, .max = 270000 },
334
	.vco = { .min = 4000000, .max = 6000000 },
3031 serge 335
	.n = { .min = 1, .max = 7 },
336
	.m = { .min = 60, .max = 300 }, /* guess */
337
	.m1 = { .min = 2, .max = 3 },
338
	.m2 = { .min = 11, .max = 156 },
339
	.p = { .min = 10, .max = 30 },
340
	.p1 = { .min = 2, .max = 3 },
341
	.p2 = { .dot_limit = 270000,
342
		.p2_slow = 2, .p2_fast = 20 },
343
};
344
 
345
static const intel_limit_t intel_limits_vlv_dp = {
3243 Serge 346
	.dot = { .min = 25000, .max = 270000 },
347
	.vco = { .min = 4000000, .max = 6000000 },
3031 serge 348
	.n = { .min = 1, .max = 7 },
3243 Serge 349
	.m = { .min = 22, .max = 450 },
3031 serge 350
	.m1 = { .min = 2, .max = 3 },
351
	.m2 = { .min = 11, .max = 156 },
352
	.p = { .min = 10, .max = 30 },
4104 Serge 353
	.p1 = { .min = 1, .max = 3 },
3031 serge 354
	.p2 = { .dot_limit = 270000,
355
		.p2_slow = 2, .p2_fast = 20 },
356
};
357
 
2327 Serge 358
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
359
						int refclk)
360
{
361
	struct drm_device *dev = crtc->dev;
362
	const intel_limit_t *limit;
363
 
364
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 365
		if (intel_is_dual_link_lvds(dev)) {
2327 Serge 366
			if (refclk == 100000)
367
				limit = &intel_limits_ironlake_dual_lvds_100m;
368
			else
369
				limit = &intel_limits_ironlake_dual_lvds;
370
		} else {
371
			if (refclk == 100000)
372
				limit = &intel_limits_ironlake_single_lvds_100m;
373
			else
374
				limit = &intel_limits_ironlake_single_lvds;
375
		}
4104 Serge 376
	} else
2327 Serge 377
		limit = &intel_limits_ironlake_dac;
378
 
379
	return limit;
380
}
381
 
382
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
383
{
384
	struct drm_device *dev = crtc->dev;
385
	const intel_limit_t *limit;
386
 
387
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 388
		if (intel_is_dual_link_lvds(dev))
2327 Serge 389
			limit = &intel_limits_g4x_dual_channel_lvds;
390
		else
391
			limit = &intel_limits_g4x_single_channel_lvds;
392
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
393
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
394
		limit = &intel_limits_g4x_hdmi;
395
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
396
		limit = &intel_limits_g4x_sdvo;
397
	} else /* The option is for other outputs */
398
		limit = &intel_limits_i9xx_sdvo;
399
 
400
	return limit;
401
}
402
 
403
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
404
{
405
	struct drm_device *dev = crtc->dev;
406
	const intel_limit_t *limit;
407
 
408
	if (HAS_PCH_SPLIT(dev))
409
		limit = intel_ironlake_limit(crtc, refclk);
410
	else if (IS_G4X(dev)) {
411
		limit = intel_g4x_limit(crtc);
412
	} else if (IS_PINEVIEW(dev)) {
413
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
414
			limit = &intel_limits_pineview_lvds;
415
		else
416
			limit = &intel_limits_pineview_sdvo;
3031 serge 417
	} else if (IS_VALLEYVIEW(dev)) {
418
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
419
			limit = &intel_limits_vlv_dac;
420
		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
421
			limit = &intel_limits_vlv_hdmi;
422
		else
423
			limit = &intel_limits_vlv_dp;
2327 Serge 424
	} else if (!IS_GEN2(dev)) {
425
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
426
			limit = &intel_limits_i9xx_lvds;
427
		else
428
			limit = &intel_limits_i9xx_sdvo;
429
	} else {
430
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
431
			limit = &intel_limits_i8xx_lvds;
4104 Serge 432
		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
433
			limit = &intel_limits_i8xx_dvo;
2327 Serge 434
		else
4104 Serge 435
			limit = &intel_limits_i8xx_dac;
2327 Serge 436
	}
437
	return limit;
438
}
439
 
440
/* m1 is reserved as 0 in Pineview, n is a ring counter */
441
static void pineview_clock(int refclk, intel_clock_t *clock)
442
{
443
	clock->m = clock->m2 + 2;
444
	clock->p = clock->p1 * clock->p2;
445
	clock->vco = refclk * clock->m / clock->n;
446
	clock->dot = clock->vco / clock->p;
447
}
448
 
4104 Serge 449
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
2327 Serge 450
{
4104 Serge 451
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
452
}
453
 
454
static void i9xx_clock(int refclk, intel_clock_t *clock)
455
{
456
	clock->m = i9xx_dpll_compute_m(clock);
2327 Serge 457
	clock->p = clock->p1 * clock->p2;
458
	clock->vco = refclk * clock->m / (clock->n + 2);
459
	clock->dot = clock->vco / clock->p;
460
}
461
 
462
/**
463
 * Returns whether any output on the specified pipe is of the specified type
464
 */
465
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
466
{
467
	struct drm_device *dev = crtc->dev;
468
	struct intel_encoder *encoder;
469
 
3031 serge 470
	for_each_encoder_on_crtc(dev, crtc, encoder)
471
		if (encoder->type == type)
2327 Serge 472
			return true;
473
 
474
	return false;
475
}
476
 
477
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
478
/**
479
 * Returns whether the given set of divisors are valid for a given refclk with
480
 * the given connectors.
481
 */
482
 
483
static bool intel_PLL_is_valid(struct drm_device *dev,
484
			       const intel_limit_t *limit,
485
			       const intel_clock_t *clock)
486
{
487
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
2342 Serge 488
		INTELPllInvalid("p1 out of range\n");
2327 Serge 489
	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
2342 Serge 490
		INTELPllInvalid("p out of range\n");
2327 Serge 491
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
2342 Serge 492
		INTELPllInvalid("m2 out of range\n");
2327 Serge 493
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
2342 Serge 494
		INTELPllInvalid("m1 out of range\n");
2327 Serge 495
	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
2342 Serge 496
		INTELPllInvalid("m1 <= m2\n");
2327 Serge 497
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
2342 Serge 498
		INTELPllInvalid("m out of range\n");
2327 Serge 499
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
2342 Serge 500
		INTELPllInvalid("n out of range\n");
2327 Serge 501
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
2342 Serge 502
		INTELPllInvalid("vco out of range\n");
2327 Serge 503
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
504
	 * connector, etc., rather than just a single range.
505
	 */
506
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
2342 Serge 507
		INTELPllInvalid("dot out of range\n");
2327 Serge 508
 
509
	return true;
510
}
511
 
512
static bool
4104 Serge 513
i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 514
		    int target, int refclk, intel_clock_t *match_clock,
515
		    intel_clock_t *best_clock)
2327 Serge 516
{
517
	struct drm_device *dev = crtc->dev;
518
	intel_clock_t clock;
519
	int err = target;
520
 
3480 Serge 521
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2327 Serge 522
		/*
3480 Serge 523
		 * For LVDS just rely on its current settings for dual-channel.
524
		 * We haven't figured out how to reliably set up different
525
		 * single/dual channel state, if we even can.
2327 Serge 526
		 */
3480 Serge 527
		if (intel_is_dual_link_lvds(dev))
2327 Serge 528
			clock.p2 = limit->p2.p2_fast;
529
		else
530
			clock.p2 = limit->p2.p2_slow;
531
	} else {
532
		if (target < limit->p2.dot_limit)
533
			clock.p2 = limit->p2.p2_slow;
534
		else
535
			clock.p2 = limit->p2.p2_fast;
536
	}
537
 
2342 Serge 538
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 539
 
540
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
541
	     clock.m1++) {
542
		for (clock.m2 = limit->m2.min;
543
		     clock.m2 <= limit->m2.max; clock.m2++) {
4104 Serge 544
			if (clock.m2 >= clock.m1)
2327 Serge 545
				break;
546
			for (clock.n = limit->n.min;
547
			     clock.n <= limit->n.max; clock.n++) {
548
				for (clock.p1 = limit->p1.min;
549
					clock.p1 <= limit->p1.max; clock.p1++) {
550
					int this_err;
551
 
4104 Serge 552
					i9xx_clock(refclk, &clock);
2327 Serge 553
					if (!intel_PLL_is_valid(dev, limit,
554
								&clock))
555
						continue;
3031 serge 556
					if (match_clock &&
557
					    clock.p != match_clock->p)
558
						continue;
2327 Serge 559
 
560
					this_err = abs(clock.dot - target);
561
					if (this_err < err) {
562
						*best_clock = clock;
563
						err = this_err;
564
					}
565
				}
566
			}
567
		}
568
	}
569
 
570
	return (err != target);
571
}
572
 
573
static bool
4104 Serge 574
pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
575
		   int target, int refclk, intel_clock_t *match_clock,
576
		   intel_clock_t *best_clock)
577
{
578
	struct drm_device *dev = crtc->dev;
579
	intel_clock_t clock;
580
	int err = target;
581
 
582
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
583
		/*
584
		 * For LVDS just rely on its current settings for dual-channel.
585
		 * We haven't figured out how to reliably set up different
586
		 * single/dual channel state, if we even can.
587
		 */
588
		if (intel_is_dual_link_lvds(dev))
589
			clock.p2 = limit->p2.p2_fast;
590
		else
591
			clock.p2 = limit->p2.p2_slow;
592
	} else {
593
		if (target < limit->p2.dot_limit)
594
			clock.p2 = limit->p2.p2_slow;
595
		else
596
			clock.p2 = limit->p2.p2_fast;
597
	}
598
 
599
	memset(best_clock, 0, sizeof(*best_clock));
600
 
601
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
602
	     clock.m1++) {
603
		for (clock.m2 = limit->m2.min;
604
		     clock.m2 <= limit->m2.max; clock.m2++) {
605
			for (clock.n = limit->n.min;
606
			     clock.n <= limit->n.max; clock.n++) {
607
				for (clock.p1 = limit->p1.min;
608
					clock.p1 <= limit->p1.max; clock.p1++) {
609
					int this_err;
610
 
611
					pineview_clock(refclk, &clock);
612
					if (!intel_PLL_is_valid(dev, limit,
613
								&clock))
614
						continue;
615
					if (match_clock &&
616
					    clock.p != match_clock->p)
617
						continue;
618
 
619
					this_err = abs(clock.dot - target);
620
					if (this_err < err) {
621
						*best_clock = clock;
622
						err = this_err;
623
					}
624
				}
625
			}
626
		}
627
	}
628
 
629
	return (err != target);
630
}
631
 
632
static bool
633
g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 634
			int target, int refclk, intel_clock_t *match_clock,
635
			intel_clock_t *best_clock)
2327 Serge 636
{
637
	struct drm_device *dev = crtc->dev;
638
	intel_clock_t clock;
639
	int max_n;
640
	bool found;
641
	/* approximately equals target * 0.00585 */
642
	int err_most = (target >> 8) + (target >> 9);
643
	found = false;
644
 
645
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3480 Serge 646
		if (intel_is_dual_link_lvds(dev))
2327 Serge 647
			clock.p2 = limit->p2.p2_fast;
648
		else
649
			clock.p2 = limit->p2.p2_slow;
650
	} else {
651
		if (target < limit->p2.dot_limit)
652
			clock.p2 = limit->p2.p2_slow;
653
		else
654
			clock.p2 = limit->p2.p2_fast;
655
	}
656
 
657
	memset(best_clock, 0, sizeof(*best_clock));
658
	max_n = limit->n.max;
659
	/* based on hardware requirement, prefer smaller n to precision */
660
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
661
		/* based on hardware requirement, prefere larger m1,m2 */
662
		for (clock.m1 = limit->m1.max;
663
		     clock.m1 >= limit->m1.min; clock.m1--) {
664
			for (clock.m2 = limit->m2.max;
665
			     clock.m2 >= limit->m2.min; clock.m2--) {
666
				for (clock.p1 = limit->p1.max;
667
				     clock.p1 >= limit->p1.min; clock.p1--) {
668
					int this_err;
669
 
4104 Serge 670
					i9xx_clock(refclk, &clock);
2327 Serge 671
					if (!intel_PLL_is_valid(dev, limit,
672
								&clock))
673
						continue;
674
 
675
					this_err = abs(clock.dot - target);
676
					if (this_err < err_most) {
677
						*best_clock = clock;
678
						err_most = this_err;
679
						max_n = clock.n;
680
						found = true;
681
					}
682
				}
683
			}
684
		}
685
	}
686
	return found;
687
}
688
 
689
static bool
4104 Serge 690
vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 691
			int target, int refclk, intel_clock_t *match_clock,
692
			intel_clock_t *best_clock)
693
{
694
	u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
695
	u32 m, n, fastclk;
4104 Serge 696
	u32 updrate, minupdate, p;
3031 serge 697
	unsigned long bestppm, ppm, absppm;
698
	int dotclk, flag;
2327 Serge 699
 
3031 serge 700
	flag = 0;
701
	dotclk = target * 1000;
702
	bestppm = 1000000;
703
	ppm = absppm = 0;
704
	fastclk = dotclk / (2*100);
705
	updrate = 0;
706
	minupdate = 19200;
707
	n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
708
	bestm1 = bestm2 = bestp1 = bestp2 = 0;
709
 
710
	/* based on hardware requirement, prefer smaller n to precision */
711
	for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
712
		updrate = refclk / n;
713
		for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
714
			for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
715
				if (p2 > 10)
716
					p2 = p2 - 1;
717
				p = p1 * p2;
718
				/* based on hardware requirement, prefer bigger m1,m2 values */
719
				for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
720
					m2 = (((2*(fastclk * p * n / m1 )) +
721
					       refclk) / (2*refclk));
722
					m = m1 * m2;
723
					vco = updrate * m;
724
					if (vco >= limit->vco.min && vco < limit->vco.max) {
725
						ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
726
						absppm = (ppm > 0) ? ppm : (-ppm);
727
						if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
728
							bestppm = 0;
729
							flag = 1;
730
						}
731
						if (absppm < bestppm - 10) {
732
							bestppm = absppm;
733
							flag = 1;
734
						}
735
						if (flag) {
736
							bestn = n;
737
							bestm1 = m1;
738
							bestm2 = m2;
739
							bestp1 = p1;
740
							bestp2 = p2;
741
							flag = 0;
742
						}
743
					}
744
				}
745
			}
746
		}
747
	}
748
	best_clock->n = bestn;
749
	best_clock->m1 = bestm1;
750
	best_clock->m2 = bestm2;
751
	best_clock->p1 = bestp1;
752
	best_clock->p2 = bestp2;
753
 
754
	return true;
755
}
756
 
3243 Serge 757
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
758
					     enum pipe pipe)
759
{
760
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
761
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
762
 
3746 Serge 763
	return intel_crtc->config.cpu_transcoder;
3243 Serge 764
}
765
 
3031 serge 766
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
767
{
768
	struct drm_i915_private *dev_priv = dev->dev_private;
769
	u32 frame, frame_reg = PIPEFRAME(pipe);
770
 
771
	frame = I915_READ(frame_reg);
772
 
773
	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
774
		DRM_DEBUG_KMS("vblank wait timed out\n");
775
}
776
 
2327 Serge 777
/**
778
 * intel_wait_for_vblank - wait for vblank on a given pipe
779
 * @dev: drm device
780
 * @pipe: pipe to wait for
781
 *
782
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
783
 * mode setting code.
784
 */
785
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
786
{
787
	struct drm_i915_private *dev_priv = dev->dev_private;
788
	int pipestat_reg = PIPESTAT(pipe);
789
 
3031 serge 790
	if (INTEL_INFO(dev)->gen >= 5) {
791
		ironlake_wait_for_vblank(dev, pipe);
792
		return;
793
	}
794
 
2327 Serge 795
	/* Clear existing vblank status. Note this will clear any other
796
	 * sticky status fields as well.
797
	 *
798
	 * This races with i915_driver_irq_handler() with the result
799
	 * that either function could miss a vblank event.  Here it is not
800
	 * fatal, as we will either wait upon the next vblank interrupt or
801
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
802
	 * called during modeset at which time the GPU should be idle and
803
	 * should *not* be performing page flips and thus not waiting on
804
	 * vblanks...
805
	 * Currently, the result of us stealing a vblank from the irq
806
	 * handler is that a single frame will be skipped during swapbuffers.
807
	 */
808
	I915_WRITE(pipestat_reg,
809
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
810
 
811
	/* Wait for vblank interrupt bit to set */
812
	if (wait_for(I915_READ(pipestat_reg) &
813
		     PIPE_VBLANK_INTERRUPT_STATUS,
814
		     50))
815
		DRM_DEBUG_KMS("vblank wait timed out\n");
816
}
817
 
818
/*
819
 * intel_wait_for_pipe_off - wait for pipe to turn off
820
 * @dev: drm device
821
 * @pipe: pipe to wait for
822
 *
823
 * After disabling a pipe, we can't wait for vblank in the usual way,
824
 * spinning on the vblank interrupt status bit, since we won't actually
825
 * see an interrupt when the pipe is disabled.
826
 *
827
 * On Gen4 and above:
828
 *   wait for the pipe register state bit to turn off
829
 *
830
 * Otherwise:
831
 *   wait for the display line value to settle (it usually
832
 *   ends up stopping at the start of the next frame).
833
 *
834
 */
835
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
836
{
837
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 838
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
839
								      pipe);
2327 Serge 840
 
841
	if (INTEL_INFO(dev)->gen >= 4) {
3243 Serge 842
		int reg = PIPECONF(cpu_transcoder);
2327 Serge 843
 
844
		/* Wait for the Pipe State to go off */
845
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
846
			     100))
3031 serge 847
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 848
	} else {
3031 serge 849
		u32 last_line, line_mask;
2327 Serge 850
		int reg = PIPEDSL(pipe);
4104 Serge 851
		unsigned long timeout = GetTimerTicks() + msecs_to_jiffies(100);
2327 Serge 852
 
3031 serge 853
		if (IS_GEN2(dev))
854
			line_mask = DSL_LINEMASK_GEN2;
855
		else
856
			line_mask = DSL_LINEMASK_GEN3;
857
 
2327 Serge 858
		/* Wait for the display line to settle */
859
		do {
3031 serge 860
			last_line = I915_READ(reg) & line_mask;
2327 Serge 861
			mdelay(5);
3031 serge 862
		} while (((I915_READ(reg) & line_mask) != last_line) &&
863
			 time_after(timeout, GetTimerTicks()));
864
		if (time_after(GetTimerTicks(), timeout))
865
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 866
	}
867
}
868
 
3480 Serge 869
/*
870
 * ibx_digital_port_connected - is the specified port connected?
871
 * @dev_priv: i915 private structure
872
 * @port: the port to test
873
 *
874
 * Returns true if @port is connected, false otherwise.
875
 */
876
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
877
				struct intel_digital_port *port)
878
{
879
	u32 bit;
880
 
881
	if (HAS_PCH_IBX(dev_priv->dev)) {
882
		switch(port->port) {
883
		case PORT_B:
884
			bit = SDE_PORTB_HOTPLUG;
885
			break;
886
		case PORT_C:
887
			bit = SDE_PORTC_HOTPLUG;
888
			break;
889
		case PORT_D:
890
			bit = SDE_PORTD_HOTPLUG;
891
			break;
892
		default:
893
			return true;
894
		}
895
	} else {
896
		switch(port->port) {
897
		case PORT_B:
898
			bit = SDE_PORTB_HOTPLUG_CPT;
899
			break;
900
		case PORT_C:
901
			bit = SDE_PORTC_HOTPLUG_CPT;
902
			break;
903
		case PORT_D:
904
			bit = SDE_PORTD_HOTPLUG_CPT;
905
			break;
906
		default:
907
			return true;
908
		}
909
	}
910
 
911
	return I915_READ(SDEISR) & bit;
912
}
913
 
2327 Serge 914
static const char *state_string(bool enabled)
915
{
916
	return enabled ? "on" : "off";
917
}
918
 
919
/* Only for pre-ILK configs */
4104 Serge 920
void assert_pll(struct drm_i915_private *dev_priv,
2327 Serge 921
		       enum pipe pipe, bool state)
922
{
923
	int reg;
924
	u32 val;
925
	bool cur_state;
926
 
927
	reg = DPLL(pipe);
928
	val = I915_READ(reg);
929
	cur_state = !!(val & DPLL_VCO_ENABLE);
930
	WARN(cur_state != state,
931
	     "PLL state assertion failure (expected %s, current %s)\n",
932
	     state_string(state), state_string(cur_state));
933
}
934
 
4104 Serge 935
struct intel_shared_dpll *
936
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
937
{
938
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
939
 
940
	if (crtc->config.shared_dpll < 0)
941
		return NULL;
942
 
943
	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
944
}
945
 
2327 Serge 946
/* For ILK+ */
4104 Serge 947
void assert_shared_dpll(struct drm_i915_private *dev_priv,
948
			       struct intel_shared_dpll *pll,
3031 serge 949
			   bool state)
2327 Serge 950
{
951
	bool cur_state;
4104 Serge 952
	struct intel_dpll_hw_state hw_state;
2327 Serge 953
 
3031 serge 954
	if (HAS_PCH_LPT(dev_priv->dev)) {
955
		DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
956
		return;
957
	}
2342 Serge 958
 
3031 serge 959
	if (WARN (!pll,
4104 Serge 960
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
3031 serge 961
		return;
2342 Serge 962
 
4104 Serge 963
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
3031 serge 964
	WARN(cur_state != state,
4104 Serge 965
	     "%s assertion failure (expected %s, current %s)\n",
966
	     pll->name, state_string(state), state_string(cur_state));
2327 Serge 967
}
968
 
969
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
970
			  enum pipe pipe, bool state)
971
{
972
	int reg;
973
	u32 val;
974
	bool cur_state;
3243 Serge 975
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
976
								      pipe);
2327 Serge 977
 
3480 Serge 978
	if (HAS_DDI(dev_priv->dev)) {
979
		/* DDI does not have a specific FDI_TX register */
3243 Serge 980
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
3031 serge 981
		val = I915_READ(reg);
3243 Serge 982
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
3031 serge 983
	} else {
2327 Serge 984
	reg = FDI_TX_CTL(pipe);
985
	val = I915_READ(reg);
986
	cur_state = !!(val & FDI_TX_ENABLE);
3031 serge 987
	}
2327 Serge 988
	WARN(cur_state != state,
989
	     "FDI TX state assertion failure (expected %s, current %s)\n",
990
	     state_string(state), state_string(cur_state));
991
}
992
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
993
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
994
 
995
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
996
			  enum pipe pipe, bool state)
997
{
998
	int reg;
999
	u32 val;
1000
	bool cur_state;
1001
 
1002
	reg = FDI_RX_CTL(pipe);
1003
	val = I915_READ(reg);
1004
	cur_state = !!(val & FDI_RX_ENABLE);
1005
	WARN(cur_state != state,
1006
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1007
	     state_string(state), state_string(cur_state));
1008
}
1009
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1010
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1011
 
1012
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1013
				      enum pipe pipe)
1014
{
1015
	int reg;
1016
	u32 val;
1017
 
1018
	/* ILK FDI PLL is always enabled */
1019
	if (dev_priv->info->gen == 5)
1020
		return;
1021
 
3031 serge 1022
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
3480 Serge 1023
	if (HAS_DDI(dev_priv->dev))
3031 serge 1024
		return;
1025
 
2327 Serge 1026
	reg = FDI_TX_CTL(pipe);
1027
	val = I915_READ(reg);
1028
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1029
}
1030
 
4104 Serge 1031
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1032
		       enum pipe pipe, bool state)
2327 Serge 1033
{
1034
	int reg;
1035
	u32 val;
4104 Serge 1036
	bool cur_state;
2327 Serge 1037
 
1038
	reg = FDI_RX_CTL(pipe);
1039
	val = I915_READ(reg);
4104 Serge 1040
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1041
	WARN(cur_state != state,
1042
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1043
	     state_string(state), state_string(cur_state));
2327 Serge 1044
}
1045
 
1046
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1047
				  enum pipe pipe)
1048
{
1049
	int pp_reg, lvds_reg;
1050
	u32 val;
1051
	enum pipe panel_pipe = PIPE_A;
1052
	bool locked = true;
1053
 
1054
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1055
		pp_reg = PCH_PP_CONTROL;
1056
		lvds_reg = PCH_LVDS;
1057
	} else {
1058
		pp_reg = PP_CONTROL;
1059
		lvds_reg = LVDS;
1060
	}
1061
 
1062
	val = I915_READ(pp_reg);
1063
	if (!(val & PANEL_POWER_ON) ||
1064
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1065
		locked = false;
1066
 
1067
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1068
		panel_pipe = PIPE_B;
1069
 
1070
	WARN(panel_pipe == pipe && locked,
1071
	     "panel assertion failure, pipe %c regs locked\n",
1072
	     pipe_name(pipe));
1073
}
1074
 
2342 Serge 1075
void assert_pipe(struct drm_i915_private *dev_priv,
2327 Serge 1076
			enum pipe pipe, bool state)
1077
{
1078
	int reg;
1079
	u32 val;
1080
	bool cur_state;
3243 Serge 1081
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1082
								      pipe);
2327 Serge 1083
 
3031 serge 1084
	/* if we need the pipe A quirk it must be always on */
1085
	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1086
		state = true;
1087
 
4104 Serge 1088
	if (!intel_display_power_enabled(dev_priv->dev,
1089
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
3480 Serge 1090
		cur_state = false;
1091
	} else {
3243 Serge 1092
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1093
	val = I915_READ(reg);
1094
	cur_state = !!(val & PIPECONF_ENABLE);
3480 Serge 1095
	}
1096
 
2327 Serge 1097
	WARN(cur_state != state,
1098
	     "pipe %c assertion failure (expected %s, current %s)\n",
1099
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1100
}
1101
 
3031 serge 1102
static void assert_plane(struct drm_i915_private *dev_priv,
1103
			 enum plane plane, bool state)
2327 Serge 1104
{
1105
	int reg;
1106
	u32 val;
3031 serge 1107
	bool cur_state;
2327 Serge 1108
 
1109
	reg = DSPCNTR(plane);
1110
	val = I915_READ(reg);
3031 serge 1111
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1112
	WARN(cur_state != state,
1113
	     "plane %c assertion failure (expected %s, current %s)\n",
1114
	     plane_name(plane), state_string(state), state_string(cur_state));
2327 Serge 1115
}
1116
 
3031 serge 1117
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1118
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1119
 
2327 Serge 1120
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1121
				   enum pipe pipe)
1122
{
4104 Serge 1123
	struct drm_device *dev = dev_priv->dev;
2327 Serge 1124
	int reg, i;
1125
	u32 val;
1126
	int cur_pipe;
1127
 
4104 Serge 1128
	/* Primary planes are fixed to pipes on gen4+ */
1129
	if (INTEL_INFO(dev)->gen >= 4) {
3031 serge 1130
		reg = DSPCNTR(pipe);
1131
		val = I915_READ(reg);
1132
		WARN((val & DISPLAY_PLANE_ENABLE),
1133
		     "plane %c assertion failure, should be disabled but not\n",
1134
		     plane_name(pipe));
2327 Serge 1135
		return;
3031 serge 1136
	}
2327 Serge 1137
 
1138
	/* Need to check both planes against the pipe */
4104 Serge 1139
	for_each_pipe(i) {
2327 Serge 1140
		reg = DSPCNTR(i);
1141
		val = I915_READ(reg);
1142
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1143
			DISPPLANE_SEL_PIPE_SHIFT;
1144
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1145
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1146
		     plane_name(i), pipe_name(pipe));
1147
	}
1148
}
1149
 
3746 Serge 1150
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1151
				    enum pipe pipe)
1152
{
4104 Serge 1153
	struct drm_device *dev = dev_priv->dev;
3746 Serge 1154
	int reg, i;
1155
	u32 val;
1156
 
4104 Serge 1157
	if (IS_VALLEYVIEW(dev)) {
3746 Serge 1158
	for (i = 0; i < dev_priv->num_plane; i++) {
1159
		reg = SPCNTR(pipe, i);
1160
		val = I915_READ(reg);
1161
		WARN((val & SP_ENABLE),
4104 Serge 1162
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1163
			     sprite_name(pipe, i), pipe_name(pipe));
1164
		}
1165
	} else if (INTEL_INFO(dev)->gen >= 7) {
1166
		reg = SPRCTL(pipe);
1167
		val = I915_READ(reg);
1168
		WARN((val & SPRITE_ENABLE),
1169
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1170
		     plane_name(pipe), pipe_name(pipe));
1171
	} else if (INTEL_INFO(dev)->gen >= 5) {
1172
		reg = DVSCNTR(pipe);
1173
		val = I915_READ(reg);
1174
		WARN((val & DVS_ENABLE),
1175
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1176
		     plane_name(pipe), pipe_name(pipe));
3746 Serge 1177
	}
1178
}
1179
 
2327 Serge 1180
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1181
{
1182
	u32 val;
1183
	bool enabled;
1184
 
3031 serge 1185
	if (HAS_PCH_LPT(dev_priv->dev)) {
1186
		DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1187
		return;
1188
	}
1189
 
2327 Serge 1190
	val = I915_READ(PCH_DREF_CONTROL);
1191
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1192
			    DREF_SUPERSPREAD_SOURCE_MASK));
1193
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1194
}
1195
 
4104 Serge 1196
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
2327 Serge 1197
				       enum pipe pipe)
1198
{
1199
	int reg;
1200
	u32 val;
1201
	bool enabled;
1202
 
4104 Serge 1203
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1204
	val = I915_READ(reg);
1205
	enabled = !!(val & TRANS_ENABLE);
1206
	WARN(enabled,
1207
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1208
	     pipe_name(pipe));
1209
}
1210
 
1211
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1212
			    enum pipe pipe, u32 port_sel, u32 val)
1213
{
1214
	if ((val & DP_PORT_EN) == 0)
1215
		return false;
1216
 
1217
	if (HAS_PCH_CPT(dev_priv->dev)) {
1218
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1219
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1220
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1221
			return false;
1222
	} else {
1223
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1224
			return false;
1225
	}
1226
	return true;
1227
}
1228
 
1229
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1230
			      enum pipe pipe, u32 val)
1231
{
3746 Serge 1232
	if ((val & SDVO_ENABLE) == 0)
2327 Serge 1233
		return false;
1234
 
1235
	if (HAS_PCH_CPT(dev_priv->dev)) {
3746 Serge 1236
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
2327 Serge 1237
			return false;
1238
	} else {
3746 Serge 1239
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
2327 Serge 1240
			return false;
1241
	}
1242
	return true;
1243
}
1244
 
1245
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1246
			      enum pipe pipe, u32 val)
1247
{
1248
	if ((val & LVDS_PORT_EN) == 0)
1249
		return false;
1250
 
1251
	if (HAS_PCH_CPT(dev_priv->dev)) {
1252
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1253
			return false;
1254
	} else {
1255
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1256
			return false;
1257
	}
1258
	return true;
1259
}
1260
 
1261
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1262
			      enum pipe pipe, u32 val)
1263
{
1264
	if ((val & ADPA_DAC_ENABLE) == 0)
1265
		return false;
1266
	if (HAS_PCH_CPT(dev_priv->dev)) {
1267
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1268
			return false;
1269
	} else {
1270
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1271
			return false;
1272
	}
1273
	return true;
1274
}
1275
 
1276
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1277
				   enum pipe pipe, int reg, u32 port_sel)
1278
{
1279
	u32 val = I915_READ(reg);
1280
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1281
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1282
	     reg, pipe_name(pipe));
3031 serge 1283
 
1284
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1285
	     && (val & DP_PIPEB_SELECT),
1286
	     "IBX PCH dp port still using transcoder B\n");
2327 Serge 1287
}
1288
 
1289
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1290
				     enum pipe pipe, int reg)
1291
{
1292
	u32 val = I915_READ(reg);
3031 serge 1293
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1294
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
2327 Serge 1295
	     reg, pipe_name(pipe));
3031 serge 1296
 
3746 Serge 1297
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
3031 serge 1298
	     && (val & SDVO_PIPE_B_SELECT),
1299
	     "IBX PCH hdmi port still using transcoder B\n");
2327 Serge 1300
}
1301
 
1302
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1303
				      enum pipe pipe)
1304
{
1305
	int reg;
1306
	u32 val;
1307
 
1308
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1309
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1310
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1311
 
1312
	reg = PCH_ADPA;
1313
	val = I915_READ(reg);
3031 serge 1314
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1315
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1316
	     pipe_name(pipe));
1317
 
1318
	reg = PCH_LVDS;
1319
	val = I915_READ(reg);
3031 serge 1320
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1321
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1322
	     pipe_name(pipe));
1323
 
3746 Serge 1324
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1325
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1326
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
2327 Serge 1327
}
1328
 
4104 Serge 1329
static void vlv_enable_pll(struct intel_crtc *crtc)
2327 Serge 1330
{
4104 Serge 1331
	struct drm_device *dev = crtc->base.dev;
1332
	struct drm_i915_private *dev_priv = dev->dev_private;
1333
	int reg = DPLL(crtc->pipe);
1334
	u32 dpll = crtc->config.dpll_hw_state.dpll;
2327 Serge 1335
 
4104 Serge 1336
	assert_pipe_disabled(dev_priv, crtc->pipe);
1337
 
2327 Serge 1338
    /* No really, not for ILK+ */
4104 Serge 1339
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
2327 Serge 1340
 
1341
    /* PLL is protected by panel, make sure we can write it */
1342
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
4104 Serge 1343
		assert_panel_unlocked(dev_priv, crtc->pipe);
2327 Serge 1344
 
4104 Serge 1345
	I915_WRITE(reg, dpll);
1346
	POSTING_READ(reg);
1347
	udelay(150);
2327 Serge 1348
 
4104 Serge 1349
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1350
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1351
 
1352
	I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1353
	POSTING_READ(DPLL_MD(crtc->pipe));
1354
 
1355
	/* We do this three times for luck */
1356
	I915_WRITE(reg, dpll);
1357
	POSTING_READ(reg);
1358
	udelay(150); /* wait for warmup */
1359
	I915_WRITE(reg, dpll);
1360
	POSTING_READ(reg);
1361
	udelay(150); /* wait for warmup */
1362
	I915_WRITE(reg, dpll);
1363
	POSTING_READ(reg);
1364
	udelay(150); /* wait for warmup */
1365
}
1366
 
1367
static void i9xx_enable_pll(struct intel_crtc *crtc)
1368
{
1369
	struct drm_device *dev = crtc->base.dev;
1370
	struct drm_i915_private *dev_priv = dev->dev_private;
1371
	int reg = DPLL(crtc->pipe);
1372
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1373
 
1374
	assert_pipe_disabled(dev_priv, crtc->pipe);
1375
 
1376
	/* No really, not for ILK+ */
1377
	BUG_ON(dev_priv->info->gen >= 5);
1378
 
1379
	/* PLL is protected by panel, make sure we can write it */
1380
	if (IS_MOBILE(dev) && !IS_I830(dev))
1381
		assert_panel_unlocked(dev_priv, crtc->pipe);
1382
 
1383
	I915_WRITE(reg, dpll);
1384
 
1385
	/* Wait for the clocks to stabilize. */
1386
	POSTING_READ(reg);
1387
	udelay(150);
1388
 
1389
	if (INTEL_INFO(dev)->gen >= 4) {
1390
		I915_WRITE(DPLL_MD(crtc->pipe),
1391
			   crtc->config.dpll_hw_state.dpll_md);
1392
	} else {
1393
		/* The pixel multiplier can only be updated once the
1394
		 * DPLL is enabled and the clocks are stable.
1395
		 *
1396
		 * So write it again.
1397
		 */
1398
		I915_WRITE(reg, dpll);
1399
	}
1400
 
2327 Serge 1401
    /* We do this three times for luck */
4104 Serge 1402
	I915_WRITE(reg, dpll);
2327 Serge 1403
    POSTING_READ(reg);
1404
    udelay(150); /* wait for warmup */
4104 Serge 1405
	I915_WRITE(reg, dpll);
2327 Serge 1406
    POSTING_READ(reg);
1407
    udelay(150); /* wait for warmup */
4104 Serge 1408
	I915_WRITE(reg, dpll);
2327 Serge 1409
    POSTING_READ(reg);
1410
    udelay(150); /* wait for warmup */
1411
}
1412
 
1413
/**
4104 Serge 1414
 * i9xx_disable_pll - disable a PLL
2327 Serge 1415
 * @dev_priv: i915 private structure
1416
 * @pipe: pipe PLL to disable
1417
 *
1418
 * Disable the PLL for @pipe, making sure the pipe is off first.
1419
 *
1420
 * Note!  This is for pre-ILK only.
1421
 */
4104 Serge 1422
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
2327 Serge 1423
{
1424
	/* Don't disable pipe A or pipe A PLLs if needed */
1425
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1426
		return;
1427
 
1428
	/* Make sure the pipe isn't still relying on us */
1429
	assert_pipe_disabled(dev_priv, pipe);
1430
 
4104 Serge 1431
	I915_WRITE(DPLL(pipe), 0);
1432
	POSTING_READ(DPLL(pipe));
2327 Serge 1433
}
1434
 
4539 Serge 1435
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1436
{
1437
	u32 val = 0;
1438
 
1439
	/* Make sure the pipe isn't still relying on us */
1440
	assert_pipe_disabled(dev_priv, pipe);
1441
 
1442
	/* Leave integrated clock source enabled */
1443
	if (pipe == PIPE_B)
1444
		val = DPLL_INTEGRATED_CRI_CLK_VLV;
1445
	I915_WRITE(DPLL(pipe), val);
1446
	POSTING_READ(DPLL(pipe));
1447
}
1448
 
4104 Serge 1449
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
3031 serge 1450
{
4104 Serge 1451
	u32 port_mask;
3031 serge 1452
 
4104 Serge 1453
	if (!port)
1454
		port_mask = DPLL_PORTB_READY_MASK;
3243 Serge 1455
	else
4104 Serge 1456
		port_mask = DPLL_PORTC_READY_MASK;
3243 Serge 1457
 
4104 Serge 1458
	if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
1459
		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1460
		     'B' + port, I915_READ(DPLL(0)));
3031 serge 1461
}
1462
 
2327 Serge 1463
/**
4104 Serge 1464
 * ironlake_enable_shared_dpll - enable PCH PLL
2327 Serge 1465
 * @dev_priv: i915 private structure
1466
 * @pipe: pipe PLL to enable
1467
 *
1468
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1469
 * drives the transcoder clock.
1470
 */
4104 Serge 1471
static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1472
{
4104 Serge 1473
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1474
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1475
 
3031 serge 1476
	/* PCH PLLs only available on ILK, SNB and IVB */
1477
	BUG_ON(dev_priv->info->gen < 5);
4104 Serge 1478
	if (WARN_ON(pll == NULL))
2342 Serge 1479
		return;
1480
 
3031 serge 1481
	if (WARN_ON(pll->refcount == 0))
1482
		return;
2327 Serge 1483
 
4104 Serge 1484
	DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1485
		      pll->name, pll->active, pll->on,
1486
		      crtc->base.base.id);
3031 serge 1487
 
4104 Serge 1488
	if (pll->active++) {
1489
		WARN_ON(!pll->on);
1490
		assert_shared_dpll_enabled(dev_priv, pll);
3031 serge 1491
		return;
1492
	}
4104 Serge 1493
	WARN_ON(pll->on);
3031 serge 1494
 
4104 Serge 1495
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1496
	pll->enable(dev_priv, pll);
3031 serge 1497
	pll->on = true;
2327 Serge 1498
}
1499
 
4104 Serge 1500
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
2327 Serge 1501
{
4104 Serge 1502
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1503
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
2327 Serge 1504
 
1505
	/* PCH only available on ILK+ */
1506
	BUG_ON(dev_priv->info->gen < 5);
4104 Serge 1507
	if (WARN_ON(pll == NULL))
3031 serge 1508
	       return;
2327 Serge 1509
 
3031 serge 1510
	if (WARN_ON(pll->refcount == 0))
1511
		return;
2327 Serge 1512
 
4104 Serge 1513
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1514
		      pll->name, pll->active, pll->on,
1515
		      crtc->base.base.id);
2342 Serge 1516
 
3031 serge 1517
	if (WARN_ON(pll->active == 0)) {
4104 Serge 1518
		assert_shared_dpll_disabled(dev_priv, pll);
3031 serge 1519
		return;
1520
	}
2342 Serge 1521
 
4104 Serge 1522
	assert_shared_dpll_enabled(dev_priv, pll);
1523
	WARN_ON(!pll->on);
1524
	if (--pll->active)
2342 Serge 1525
		return;
1526
 
4104 Serge 1527
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1528
	pll->disable(dev_priv, pll);
3031 serge 1529
	pll->on = false;
2327 Serge 1530
}
1531
 
3243 Serge 1532
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1533
				    enum pipe pipe)
1534
{
3243 Serge 1535
	struct drm_device *dev = dev_priv->dev;
3031 serge 1536
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4104 Serge 1537
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3243 Serge 1538
	uint32_t reg, val, pipeconf_val;
2327 Serge 1539
 
1540
	/* PCH only available on ILK+ */
1541
	BUG_ON(dev_priv->info->gen < 5);
1542
 
1543
	/* Make sure PCH DPLL is enabled */
4104 Serge 1544
	assert_shared_dpll_enabled(dev_priv,
1545
				   intel_crtc_to_shared_dpll(intel_crtc));
2327 Serge 1546
 
1547
	/* FDI must be feeding us bits for PCH ports */
1548
	assert_fdi_tx_enabled(dev_priv, pipe);
1549
	assert_fdi_rx_enabled(dev_priv, pipe);
1550
 
3243 Serge 1551
	if (HAS_PCH_CPT(dev)) {
1552
		/* Workaround: Set the timing override bit before enabling the
1553
		 * pch transcoder. */
1554
		reg = TRANS_CHICKEN2(pipe);
1555
		val = I915_READ(reg);
1556
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1557
		I915_WRITE(reg, val);
3031 serge 1558
	}
3243 Serge 1559
 
4104 Serge 1560
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1561
	val = I915_READ(reg);
3031 serge 1562
	pipeconf_val = I915_READ(PIPECONF(pipe));
2327 Serge 1563
 
1564
	if (HAS_PCH_IBX(dev_priv->dev)) {
1565
		/*
1566
		 * make the BPC in transcoder be consistent with
1567
		 * that in pipeconf reg.
1568
		 */
3480 Serge 1569
		val &= ~PIPECONF_BPC_MASK;
1570
		val |= pipeconf_val & PIPECONF_BPC_MASK;
2327 Serge 1571
	}
3031 serge 1572
 
1573
	val &= ~TRANS_INTERLACE_MASK;
1574
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1575
		if (HAS_PCH_IBX(dev_priv->dev) &&
1576
		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1577
			val |= TRANS_LEGACY_INTERLACED_ILK;
1578
		else
1579
			val |= TRANS_INTERLACED;
1580
	else
1581
		val |= TRANS_PROGRESSIVE;
1582
 
2327 Serge 1583
	I915_WRITE(reg, val | TRANS_ENABLE);
1584
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4104 Serge 1585
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2327 Serge 1586
}
1587
 
3243 Serge 1588
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1589
				      enum transcoder cpu_transcoder)
1590
{
1591
	u32 val, pipeconf_val;
1592
 
1593
	/* PCH only available on ILK+ */
1594
	BUG_ON(dev_priv->info->gen < 5);
1595
 
1596
	/* FDI must be feeding us bits for PCH ports */
3480 Serge 1597
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
3243 Serge 1598
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1599
 
1600
	/* Workaround: set timing override bit. */
1601
	val = I915_READ(_TRANSA_CHICKEN2);
1602
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1603
	I915_WRITE(_TRANSA_CHICKEN2, val);
1604
 
1605
	val = TRANS_ENABLE;
1606
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1607
 
1608
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1609
	    PIPECONF_INTERLACED_ILK)
1610
		val |= TRANS_INTERLACED;
1611
	else
1612
		val |= TRANS_PROGRESSIVE;
1613
 
4104 Serge 1614
	I915_WRITE(LPT_TRANSCONF, val);
1615
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
3243 Serge 1616
		DRM_ERROR("Failed to enable PCH transcoder\n");
1617
}
1618
 
1619
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1620
				     enum pipe pipe)
1621
{
3243 Serge 1622
	struct drm_device *dev = dev_priv->dev;
1623
	uint32_t reg, val;
2327 Serge 1624
 
1625
	/* FDI relies on the transcoder */
1626
	assert_fdi_tx_disabled(dev_priv, pipe);
1627
	assert_fdi_rx_disabled(dev_priv, pipe);
1628
 
1629
	/* Ports must be off as well */
1630
	assert_pch_ports_disabled(dev_priv, pipe);
1631
 
4104 Serge 1632
	reg = PCH_TRANSCONF(pipe);
2327 Serge 1633
	val = I915_READ(reg);
1634
	val &= ~TRANS_ENABLE;
1635
	I915_WRITE(reg, val);
1636
	/* wait for PCH transcoder off, transcoder state */
1637
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4104 Serge 1638
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
3243 Serge 1639
 
1640
	if (!HAS_PCH_IBX(dev)) {
1641
		/* Workaround: Clear the timing override chicken bit again. */
1642
		reg = TRANS_CHICKEN2(pipe);
1643
		val = I915_READ(reg);
1644
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1645
		I915_WRITE(reg, val);
1646
	}
2327 Serge 1647
}
1648
 
3243 Serge 1649
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1650
{
1651
	u32 val;
1652
 
4104 Serge 1653
	val = I915_READ(LPT_TRANSCONF);
3243 Serge 1654
	val &= ~TRANS_ENABLE;
4104 Serge 1655
	I915_WRITE(LPT_TRANSCONF, val);
3243 Serge 1656
	/* wait for PCH transcoder off, transcoder state */
4104 Serge 1657
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
3243 Serge 1658
		DRM_ERROR("Failed to disable PCH transcoder\n");
1659
 
1660
	/* Workaround: clear timing override bit. */
1661
	val = I915_READ(_TRANSA_CHICKEN2);
1662
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1663
	I915_WRITE(_TRANSA_CHICKEN2, val);
1664
}
1665
 
2327 Serge 1666
/**
1667
 * intel_enable_pipe - enable a pipe, asserting requirements
1668
 * @dev_priv: i915 private structure
1669
 * @pipe: pipe to enable
1670
 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1671
 *
1672
 * Enable @pipe, making sure that various hardware specific requirements
1673
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1674
 *
1675
 * @pipe should be %PIPE_A or %PIPE_B.
1676
 *
1677
 * Will wait until the pipe is actually running (i.e. first vblank) before
1678
 * returning.
1679
 */
1680
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1681
			      bool pch_port)
1682
{
3243 Serge 1683
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1684
								      pipe);
3480 Serge 1685
	enum pipe pch_transcoder;
2327 Serge 1686
	int reg;
1687
	u32 val;
1688
 
4104 Serge 1689
	assert_planes_disabled(dev_priv, pipe);
1690
	assert_sprites_disabled(dev_priv, pipe);
1691
 
3480 Serge 1692
	if (HAS_PCH_LPT(dev_priv->dev))
3243 Serge 1693
		pch_transcoder = TRANSCODER_A;
1694
	else
1695
		pch_transcoder = pipe;
1696
 
2327 Serge 1697
	/*
1698
	 * A pipe without a PLL won't actually be able to drive bits from
1699
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1700
	 * need the check.
1701
	 */
1702
	if (!HAS_PCH_SPLIT(dev_priv->dev))
1703
		assert_pll_enabled(dev_priv, pipe);
1704
	else {
1705
		if (pch_port) {
1706
			/* if driving the PCH, we need FDI enabled */
3243 Serge 1707
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
3480 Serge 1708
			assert_fdi_tx_pll_enabled(dev_priv,
1709
						  (enum pipe) cpu_transcoder);
2327 Serge 1710
		}
1711
		/* FIXME: assert CPU port conditions for SNB+ */
1712
	}
1713
 
3243 Serge 1714
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1715
	val = I915_READ(reg);
1716
	if (val & PIPECONF_ENABLE)
1717
		return;
1718
 
1719
	I915_WRITE(reg, val | PIPECONF_ENABLE);
1720
	intel_wait_for_vblank(dev_priv->dev, pipe);
1721
}
1722
 
1723
/**
1724
 * intel_disable_pipe - disable a pipe, asserting requirements
1725
 * @dev_priv: i915 private structure
1726
 * @pipe: pipe to disable
1727
 *
1728
 * Disable @pipe, making sure that various hardware specific requirements
1729
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1730
 *
1731
 * @pipe should be %PIPE_A or %PIPE_B.
1732
 *
1733
 * Will wait until the pipe has shut down before returning.
1734
 */
1735
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1736
			       enum pipe pipe)
1737
{
3243 Serge 1738
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1739
								      pipe);
2327 Serge 1740
	int reg;
1741
	u32 val;
1742
 
3031 serge 1743
    /*
2327 Serge 1744
	 * Make sure planes won't keep trying to pump pixels to us,
1745
	 * or we might hang the display.
1746
	 */
1747
	assert_planes_disabled(dev_priv, pipe);
3746 Serge 1748
	assert_sprites_disabled(dev_priv, pipe);
2327 Serge 1749
 
1750
	/* Don't disable pipe A or pipe A PLLs if needed */
1751
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1752
		return;
1753
 
3243 Serge 1754
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1755
	val = I915_READ(reg);
1756
	if ((val & PIPECONF_ENABLE) == 0)
1757
		return;
1758
 
1759
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1760
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1761
}
1762
 
1763
/*
1764
 * Plane regs are double buffered, going from enabled->disabled needs a
1765
 * trigger in order to latch.  The display address reg provides this.
1766
 */
3031 serge 1767
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
2327 Serge 1768
				      enum plane plane)
1769
{
3243 Serge 1770
	if (dev_priv->info->gen >= 4)
1771
		I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1772
	else
2327 Serge 1773
	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1774
}
1775
 
1776
/**
1777
 * intel_enable_plane - enable a display plane on a given pipe
1778
 * @dev_priv: i915 private structure
1779
 * @plane: plane to enable
1780
 * @pipe: pipe being fed
1781
 *
1782
 * Enable @plane on @pipe, making sure that @pipe is running first.
1783
 */
1784
static void intel_enable_plane(struct drm_i915_private *dev_priv,
1785
			       enum plane plane, enum pipe pipe)
1786
{
1787
	int reg;
1788
	u32 val;
1789
 
1790
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1791
	assert_pipe_enabled(dev_priv, pipe);
1792
 
1793
	reg = DSPCNTR(plane);
1794
	val = I915_READ(reg);
1795
	if (val & DISPLAY_PLANE_ENABLE)
1796
		return;
1797
 
1798
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1799
	intel_flush_display_plane(dev_priv, plane);
1800
	intel_wait_for_vblank(dev_priv->dev, pipe);
1801
}
1802
 
1803
/**
1804
 * intel_disable_plane - disable a display plane
1805
 * @dev_priv: i915 private structure
1806
 * @plane: plane to disable
1807
 * @pipe: pipe consuming the data
1808
 *
1809
 * Disable @plane; should be an independent operation.
1810
 */
1811
static void intel_disable_plane(struct drm_i915_private *dev_priv,
1812
				enum plane plane, enum pipe pipe)
1813
{
1814
	int reg;
1815
	u32 val;
1816
 
1817
	reg = DSPCNTR(plane);
1818
	val = I915_READ(reg);
1819
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1820
		return;
1821
 
1822
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1823
	intel_flush_display_plane(dev_priv, plane);
3031 serge 1824
    intel_wait_for_vblank(dev_priv->dev, pipe);
2327 Serge 1825
}
1826
 
3746 Serge 1827
static bool need_vtd_wa(struct drm_device *dev)
1828
{
1829
#ifdef CONFIG_INTEL_IOMMU
1830
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
1831
		return true;
1832
#endif
1833
	return false;
1834
}
1835
 
2335 Serge 1836
int
1837
intel_pin_and_fence_fb_obj(struct drm_device *dev,
1838
			   struct drm_i915_gem_object *obj,
1839
			   struct intel_ring_buffer *pipelined)
1840
{
1841
	struct drm_i915_private *dev_priv = dev->dev_private;
1842
	u32 alignment;
1843
	int ret;
2327 Serge 1844
 
2335 Serge 1845
	switch (obj->tiling_mode) {
1846
	case I915_TILING_NONE:
1847
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1848
			alignment = 128 * 1024;
1849
		else if (INTEL_INFO(dev)->gen >= 4)
1850
			alignment = 4 * 1024;
1851
		else
1852
			alignment = 64 * 1024;
1853
		break;
1854
	case I915_TILING_X:
1855
		/* pin() will align the object as required by fence */
1856
		alignment = 0;
1857
		break;
1858
	case I915_TILING_Y:
3746 Serge 1859
		/* Despite that we check this in framebuffer_init userspace can
1860
		 * screw us over and change the tiling after the fact. Only
1861
		 * pinned buffers can't change their tiling. */
1862
		DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
2335 Serge 1863
		return -EINVAL;
1864
	default:
1865
		BUG();
1866
	}
2327 Serge 1867
 
3746 Serge 1868
	/* Note that the w/a also requires 64 PTE of padding following the
1869
	 * bo. We currently fill all unused PTE with the shadow page and so
1870
	 * we should always have valid PTE following the scanout preventing
1871
	 * the VT-d warning.
1872
	 */
1873
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
1874
		alignment = 256 * 1024;
1875
 
2335 Serge 1876
	dev_priv->mm.interruptible = false;
1877
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1878
	if (ret)
1879
		goto err_interruptible;
2327 Serge 1880
 
2335 Serge 1881
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1882
	 * fence, whereas 965+ only requires a fence if using
1883
	 * framebuffer compression.  For simplicity, we always install
1884
	 * a fence as the cost is not that onerous.
1885
	 */
3480 Serge 1886
	ret = i915_gem_object_get_fence(obj);
1887
	if (ret)
1888
		goto err_unpin;
2327 Serge 1889
 
3480 Serge 1890
	i915_gem_object_pin_fence(obj);
1891
 
2335 Serge 1892
	dev_priv->mm.interruptible = true;
1893
	return 0;
2327 Serge 1894
 
2335 Serge 1895
err_unpin:
4104 Serge 1896
	i915_gem_object_unpin_from_display_plane(obj);
2335 Serge 1897
err_interruptible:
1898
	dev_priv->mm.interruptible = true;
1899
	return ret;
1900
}
2327 Serge 1901
 
3031 serge 1902
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1903
{
1904
//	i915_gem_object_unpin_fence(obj);
1905
//	i915_gem_object_unpin(obj);
1906
}
1907
 
1908
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1909
 * is assumed to be a power-of-two. */
3480 Serge 1910
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
1911
					     unsigned int tiling_mode,
1912
					     unsigned int cpp,
3031 serge 1913
							unsigned int pitch)
1914
{
3480 Serge 1915
	if (tiling_mode != I915_TILING_NONE) {
1916
		unsigned int tile_rows, tiles;
3031 serge 1917
 
1918
	tile_rows = *y / 8;
1919
	*y %= 8;
1920
 
3480 Serge 1921
		tiles = *x / (512/cpp);
1922
		*x %= 512/cpp;
1923
 
3031 serge 1924
	return tile_rows * pitch * 8 + tiles * 4096;
3480 Serge 1925
	} else {
1926
		unsigned int offset;
1927
 
1928
		offset = *y * pitch + *x * cpp;
1929
		*y = 0;
1930
		*x = (offset & 4095) / cpp;
1931
		return offset & -4096;
1932
	}
3031 serge 1933
}
1934
 
2327 Serge 1935
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1936
                 int x, int y)
1937
{
1938
    struct drm_device *dev = crtc->dev;
1939
    struct drm_i915_private *dev_priv = dev->dev_private;
1940
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1941
    struct intel_framebuffer *intel_fb;
1942
    struct drm_i915_gem_object *obj;
1943
    int plane = intel_crtc->plane;
3031 serge 1944
	unsigned long linear_offset;
2327 Serge 1945
    u32 dspcntr;
1946
    u32 reg;
1947
 
1948
    switch (plane) {
1949
    case 0:
1950
    case 1:
1951
        break;
1952
    default:
4104 Serge 1953
		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2327 Serge 1954
        return -EINVAL;
1955
    }
1956
 
1957
    intel_fb = to_intel_framebuffer(fb);
1958
    obj = intel_fb->obj;
1959
 
1960
    reg = DSPCNTR(plane);
1961
    dspcntr = I915_READ(reg);
1962
    /* Mask out pixel format bits in case we change it */
1963
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
3243 Serge 1964
	switch (fb->pixel_format) {
1965
	case DRM_FORMAT_C8:
2327 Serge 1966
        dspcntr |= DISPPLANE_8BPP;
1967
        break;
3243 Serge 1968
	case DRM_FORMAT_XRGB1555:
1969
	case DRM_FORMAT_ARGB1555:
1970
		dspcntr |= DISPPLANE_BGRX555;
1971
		break;
1972
	case DRM_FORMAT_RGB565:
1973
		dspcntr |= DISPPLANE_BGRX565;
1974
		break;
1975
	case DRM_FORMAT_XRGB8888:
1976
	case DRM_FORMAT_ARGB8888:
1977
		dspcntr |= DISPPLANE_BGRX888;
1978
		break;
1979
	case DRM_FORMAT_XBGR8888:
1980
	case DRM_FORMAT_ABGR8888:
1981
		dspcntr |= DISPPLANE_RGBX888;
1982
		break;
1983
	case DRM_FORMAT_XRGB2101010:
1984
	case DRM_FORMAT_ARGB2101010:
1985
		dspcntr |= DISPPLANE_BGRX101010;
2327 Serge 1986
        break;
3243 Serge 1987
	case DRM_FORMAT_XBGR2101010:
1988
	case DRM_FORMAT_ABGR2101010:
1989
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 1990
        break;
1991
    default:
3746 Serge 1992
		BUG();
2327 Serge 1993
    }
3243 Serge 1994
 
2327 Serge 1995
    if (INTEL_INFO(dev)->gen >= 4) {
1996
        if (obj->tiling_mode != I915_TILING_NONE)
1997
            dspcntr |= DISPPLANE_TILED;
1998
        else
1999
            dspcntr &= ~DISPPLANE_TILED;
2000
    }
2001
 
4104 Serge 2002
	if (IS_G4X(dev))
2003
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2004
 
2327 Serge 2005
    I915_WRITE(reg, dspcntr);
2006
 
3031 serge 2007
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2327 Serge 2008
 
3031 serge 2009
	if (INTEL_INFO(dev)->gen >= 4) {
2010
		intel_crtc->dspaddr_offset =
3480 Serge 2011
			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
3031 serge 2012
							   fb->bits_per_pixel / 8,
2013
							   fb->pitches[0]);
2014
		linear_offset -= intel_crtc->dspaddr_offset;
2015
	} else {
2016
		intel_crtc->dspaddr_offset = linear_offset;
2017
	}
2018
 
4104 Serge 2019
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2020
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2021
		      fb->pitches[0]);
2342 Serge 2022
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2327 Serge 2023
    if (INTEL_INFO(dev)->gen >= 4) {
3031 serge 2024
		I915_MODIFY_DISPBASE(DSPSURF(plane),
4104 Serge 2025
				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2327 Serge 2026
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2027
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2327 Serge 2028
    } else
4104 Serge 2029
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2327 Serge 2030
    POSTING_READ(reg);
2031
 
2032
    return 0;
2033
}
2034
 
2035
static int ironlake_update_plane(struct drm_crtc *crtc,
2036
                 struct drm_framebuffer *fb, int x, int y)
2037
{
2038
    struct drm_device *dev = crtc->dev;
2039
    struct drm_i915_private *dev_priv = dev->dev_private;
2040
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2041
    struct intel_framebuffer *intel_fb;
2042
    struct drm_i915_gem_object *obj;
2043
    int plane = intel_crtc->plane;
3031 serge 2044
	unsigned long linear_offset;
2327 Serge 2045
    u32 dspcntr;
2046
    u32 reg;
2047
 
2048
    switch (plane) {
2049
    case 0:
2050
    case 1:
2342 Serge 2051
	case 2:
2327 Serge 2052
        break;
2053
    default:
4104 Serge 2054
		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2327 Serge 2055
        return -EINVAL;
2056
    }
2057
 
2058
    intel_fb = to_intel_framebuffer(fb);
2059
    obj = intel_fb->obj;
2060
 
2061
    reg = DSPCNTR(plane);
2062
    dspcntr = I915_READ(reg);
2063
    /* Mask out pixel format bits in case we change it */
2064
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
3243 Serge 2065
	switch (fb->pixel_format) {
2066
	case DRM_FORMAT_C8:
2327 Serge 2067
        dspcntr |= DISPPLANE_8BPP;
2068
        break;
3243 Serge 2069
	case DRM_FORMAT_RGB565:
2070
		dspcntr |= DISPPLANE_BGRX565;
2327 Serge 2071
        break;
3243 Serge 2072
	case DRM_FORMAT_XRGB8888:
2073
	case DRM_FORMAT_ARGB8888:
2074
		dspcntr |= DISPPLANE_BGRX888;
2075
		break;
2076
	case DRM_FORMAT_XBGR8888:
2077
	case DRM_FORMAT_ABGR8888:
2078
		dspcntr |= DISPPLANE_RGBX888;
2079
		break;
2080
	case DRM_FORMAT_XRGB2101010:
2081
	case DRM_FORMAT_ARGB2101010:
2082
		dspcntr |= DISPPLANE_BGRX101010;
2083
		break;
2084
	case DRM_FORMAT_XBGR2101010:
2085
	case DRM_FORMAT_ABGR2101010:
2086
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 2087
        break;
2088
    default:
3746 Serge 2089
		BUG();
2327 Serge 2090
    }
2091
 
3480 Serge 2092
	if (obj->tiling_mode != I915_TILING_NONE)
2093
		dspcntr |= DISPPLANE_TILED;
2094
	else
2327 Serge 2095
        dspcntr &= ~DISPPLANE_TILED;
2096
 
4104 Serge 2097
	if (IS_HASWELL(dev))
2098
		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2099
	else
2327 Serge 2100
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2101
 
2102
    I915_WRITE(reg, dspcntr);
2103
 
3031 serge 2104
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2105
	intel_crtc->dspaddr_offset =
3480 Serge 2106
		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
3031 serge 2107
						   fb->bits_per_pixel / 8,
2108
						   fb->pitches[0]);
2109
	linear_offset -= intel_crtc->dspaddr_offset;
2327 Serge 2110
 
4104 Serge 2111
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2112
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2113
		      fb->pitches[0]);
2342 Serge 2114
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
3031 serge 2115
	I915_MODIFY_DISPBASE(DSPSURF(plane),
4104 Serge 2116
			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
3243 Serge 2117
	if (IS_HASWELL(dev)) {
2118
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2119
	} else {
2330 Serge 2120
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2121
	I915_WRITE(DSPLINOFF(plane), linear_offset);
3243 Serge 2122
	}
2330 Serge 2123
	POSTING_READ(reg);
2327 Serge 2124
 
2125
    return 0;
2126
}
2127
 
2128
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2129
static int
2130
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2131
			   int x, int y, enum mode_set_atomic state)
2132
{
2133
	struct drm_device *dev = crtc->dev;
2134
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2135
 
2136
	if (dev_priv->display.disable_fbc)
2137
		dev_priv->display.disable_fbc(dev);
2138
	intel_increase_pllclock(crtc);
2139
 
2140
	return dev_priv->display.update_plane(crtc, fb, x, y);
2141
}
2142
 
2143
#if 0
4104 Serge 2144
void intel_display_handle_reset(struct drm_device *dev)
2145
{
2146
	struct drm_i915_private *dev_priv = dev->dev_private;
2147
	struct drm_crtc *crtc;
2148
 
2149
	/*
2150
	 * Flips in the rings have been nuked by the reset,
2151
	 * so complete all pending flips so that user space
2152
	 * will get its events and not get stuck.
2153
	 *
2154
	 * Also update the base address of all primary
2155
	 * planes to the the last fb to make sure we're
2156
	 * showing the correct fb after a reset.
2157
	 *
2158
	 * Need to make two loops over the crtcs so that we
2159
	 * don't try to grab a crtc mutex before the
2160
	 * pending_flip_queue really got woken up.
2161
	 */
2162
 
2163
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2164
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2165
		enum plane plane = intel_crtc->plane;
2166
 
2167
		intel_prepare_page_flip(dev, plane);
2168
		intel_finish_page_flip_plane(dev, plane);
2169
	}
2170
 
2171
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2172
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2173
 
2174
		mutex_lock(&crtc->mutex);
2175
		if (intel_crtc->active)
2176
			dev_priv->display.update_plane(crtc, crtc->fb,
2177
						       crtc->x, crtc->y);
2178
		mutex_unlock(&crtc->mutex);
2179
	}
2180
}
2181
 
3031 serge 2182
static int
2183
intel_finish_fb(struct drm_framebuffer *old_fb)
2184
{
2185
	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2186
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2187
	bool was_interruptible = dev_priv->mm.interruptible;
2327 Serge 2188
	int ret;
2189
 
3031 serge 2190
	/* Big Hammer, we also need to ensure that any pending
2191
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2192
	 * current scanout is retired before unpinning the old
2193
	 * framebuffer.
2194
	 *
2195
	 * This should only fail upon a hung GPU, in which case we
2196
	 * can safely continue.
2197
	 */
2198
	dev_priv->mm.interruptible = false;
2199
	ret = i915_gem_object_finish_gpu(obj);
2200
	dev_priv->mm.interruptible = was_interruptible;
2327 Serge 2201
 
3031 serge 2202
	return ret;
2327 Serge 2203
}
4104 Serge 2204
 
2205
static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2206
{
2207
	struct drm_device *dev = crtc->dev;
2208
	struct drm_i915_master_private *master_priv;
2209
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2210
 
2211
	if (!dev->primary->master)
2212
		return;
2213
 
2214
	master_priv = dev->primary->master->driver_priv;
2215
	if (!master_priv->sarea_priv)
2216
		return;
2217
 
2218
	switch (intel_crtc->pipe) {
2219
	case 0:
2220
		master_priv->sarea_priv->pipeA_x = x;
2221
		master_priv->sarea_priv->pipeA_y = y;
2222
		break;
2223
	case 1:
2224
		master_priv->sarea_priv->pipeB_x = x;
2225
		master_priv->sarea_priv->pipeB_y = y;
2226
		break;
2227
	default:
2228
		break;
2229
	}
2230
}
3031 serge 2231
#endif
2327 Serge 2232
 
2233
static int
2234
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
3031 serge 2235
		    struct drm_framebuffer *fb)
2327 Serge 2236
{
2237
	struct drm_device *dev = crtc->dev;
3031 serge 2238
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 2239
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 2240
	struct drm_framebuffer *old_fb;
2342 Serge 2241
	int ret;
2327 Serge 2242
 
2243
	/* no fb bound */
3031 serge 2244
	if (!fb) {
2327 Serge 2245
		DRM_ERROR("No FB bound\n");
2246
		return 0;
2247
	}
2248
 
3746 Serge 2249
	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
4104 Serge 2250
		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2251
			  plane_name(intel_crtc->plane),
3746 Serge 2252
				INTEL_INFO(dev)->num_pipes);
2327 Serge 2253
		return -EINVAL;
2254
	}
2255
 
2256
	mutex_lock(&dev->struct_mutex);
4280 Serge 2257
    ret = intel_pin_and_fence_fb_obj(dev,
2258
                    to_intel_framebuffer(fb)->obj,
2259
                    NULL);
2260
    if (ret != 0) {
2261
       mutex_unlock(&dev->struct_mutex);
2262
       DRM_ERROR("pin & fence failed\n");
2263
       return ret;
2264
    }
2327 Serge 2265
 
4280 Serge 2266
	/* Update pipe size and adjust fitter if needed */
2267
	if (i915_fastboot) {
2268
		I915_WRITE(PIPESRC(intel_crtc->pipe),
2269
			   ((crtc->mode.hdisplay - 1) << 16) |
2270
			   (crtc->mode.vdisplay - 1));
2271
		if (!intel_crtc->config.pch_pfit.enabled &&
2272
		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2273
		     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2274
			I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2275
			I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2276
			I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2277
		}
2278
	}
3031 serge 2279
 
2280
	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2327 Serge 2281
	if (ret) {
3031 serge 2282
		intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2327 Serge 2283
		mutex_unlock(&dev->struct_mutex);
2284
		DRM_ERROR("failed to update base address\n");
3243 Serge 2285
        return ret;
2327 Serge 2286
	}
2287
 
3031 serge 2288
	old_fb = crtc->fb;
2289
	crtc->fb = fb;
2290
	crtc->x = x;
2291
	crtc->y = y;
2292
 
2293
	if (old_fb) {
4104 Serge 2294
		if (intel_crtc->active && old_fb != fb)
3031 serge 2295
		intel_wait_for_vblank(dev, intel_crtc->pipe);
2296
		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2297
	}
2298
 
2299
	intel_update_fbc(dev);
4104 Serge 2300
	intel_edp_psr_update(dev);
2336 Serge 2301
	mutex_unlock(&dev->struct_mutex);
2327 Serge 2302
 
2336 Serge 2303
    return 0;
2327 Serge 2304
}
2305
 
2306
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2307
{
2308
	struct drm_device *dev = crtc->dev;
2309
	struct drm_i915_private *dev_priv = dev->dev_private;
2310
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2311
	int pipe = intel_crtc->pipe;
2312
	u32 reg, temp;
2313
 
2314
	/* enable normal train */
2315
	reg = FDI_TX_CTL(pipe);
2316
	temp = I915_READ(reg);
2317
	if (IS_IVYBRIDGE(dev)) {
2318
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2319
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2320
	} else {
2321
		temp &= ~FDI_LINK_TRAIN_NONE;
2322
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2323
	}
2324
	I915_WRITE(reg, temp);
2325
 
2326
	reg = FDI_RX_CTL(pipe);
2327
	temp = I915_READ(reg);
2328
	if (HAS_PCH_CPT(dev)) {
2329
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2330
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2331
	} else {
2332
		temp &= ~FDI_LINK_TRAIN_NONE;
2333
		temp |= FDI_LINK_TRAIN_NONE;
2334
	}
2335
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2336
 
2337
	/* wait one idle pattern time */
2338
	POSTING_READ(reg);
2339
	udelay(1000);
2340
 
2341
	/* IVB wants error correction enabled */
2342
	if (IS_IVYBRIDGE(dev))
2343
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2344
			   FDI_FE_ERRC_ENABLE);
2345
}
2346
 
4280 Serge 2347
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
4104 Serge 2348
{
4280 Serge 2349
	return crtc->base.enabled && crtc->active &&
2350
		crtc->config.has_pch_encoder;
4104 Serge 2351
}
2352
 
3243 Serge 2353
static void ivb_modeset_global_resources(struct drm_device *dev)
2327 Serge 2354
{
2355
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 2356
	struct intel_crtc *pipe_B_crtc =
2357
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2358
	struct intel_crtc *pipe_C_crtc =
2359
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2360
	uint32_t temp;
2327 Serge 2361
 
4104 Serge 2362
	/*
2363
	 * When everything is off disable fdi C so that we could enable fdi B
2364
	 * with all lanes. Note that we don't care about enabled pipes without
2365
	 * an enabled pch encoder.
2366
	 */
2367
	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2368
	    !pipe_has_enabled_pch(pipe_C_crtc)) {
3243 Serge 2369
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2370
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2371
 
2372
		temp = I915_READ(SOUTH_CHICKEN1);
2373
		temp &= ~FDI_BC_BIFURCATION_SELECT;
2374
		DRM_DEBUG_KMS("disabling fdi C rx\n");
2375
		I915_WRITE(SOUTH_CHICKEN1, temp);
2376
	}
2327 Serge 2377
}
2378
 
2379
/* The FDI link training functions for ILK/Ibexpeak. */
2380
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2381
{
2382
    struct drm_device *dev = crtc->dev;
2383
    struct drm_i915_private *dev_priv = dev->dev_private;
2384
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2385
    int pipe = intel_crtc->pipe;
2386
    int plane = intel_crtc->plane;
2387
    u32 reg, temp, tries;
2388
 
2389
    /* FDI needs bits from pipe & plane first */
2390
    assert_pipe_enabled(dev_priv, pipe);
2391
    assert_plane_enabled(dev_priv, plane);
2392
 
2393
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2394
       for train result */
2395
    reg = FDI_RX_IMR(pipe);
2396
    temp = I915_READ(reg);
2397
    temp &= ~FDI_RX_SYMBOL_LOCK;
2398
    temp &= ~FDI_RX_BIT_LOCK;
2399
    I915_WRITE(reg, temp);
2400
    I915_READ(reg);
2401
    udelay(150);
2402
 
2403
    /* enable CPU FDI TX and PCH FDI RX */
2404
    reg = FDI_TX_CTL(pipe);
2405
    temp = I915_READ(reg);
4104 Serge 2406
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2407
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 2408
    temp &= ~FDI_LINK_TRAIN_NONE;
2409
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2410
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2411
 
2412
    reg = FDI_RX_CTL(pipe);
2413
    temp = I915_READ(reg);
2414
    temp &= ~FDI_LINK_TRAIN_NONE;
2415
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2416
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2417
 
2418
    POSTING_READ(reg);
2419
    udelay(150);
2420
 
2421
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2422
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2423
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2424
               FDI_RX_PHASE_SYNC_POINTER_EN);
2425
 
2426
    reg = FDI_RX_IIR(pipe);
2427
    for (tries = 0; tries < 5; tries++) {
2428
        temp = I915_READ(reg);
2429
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2430
 
2431
        if ((temp & FDI_RX_BIT_LOCK)) {
2432
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2433
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2434
            break;
2435
        }
2436
    }
2437
    if (tries == 5)
2438
        DRM_ERROR("FDI train 1 fail!\n");
2439
 
2440
    /* Train 2 */
2441
    reg = FDI_TX_CTL(pipe);
2442
    temp = I915_READ(reg);
2443
    temp &= ~FDI_LINK_TRAIN_NONE;
2444
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2445
    I915_WRITE(reg, temp);
2446
 
2447
    reg = FDI_RX_CTL(pipe);
2448
    temp = I915_READ(reg);
2449
    temp &= ~FDI_LINK_TRAIN_NONE;
2450
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2451
    I915_WRITE(reg, temp);
2452
 
2453
    POSTING_READ(reg);
2454
    udelay(150);
2455
 
2456
    reg = FDI_RX_IIR(pipe);
2457
    for (tries = 0; tries < 5; tries++) {
2458
        temp = I915_READ(reg);
2459
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2460
 
2461
        if (temp & FDI_RX_SYMBOL_LOCK) {
2462
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2463
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2464
            break;
2465
        }
2466
    }
2467
    if (tries == 5)
2468
        DRM_ERROR("FDI train 2 fail!\n");
2469
 
2470
    DRM_DEBUG_KMS("FDI train done\n");
2471
 
2472
}
2473
 
2342 Serge 2474
static const int snb_b_fdi_train_param[] = {
2327 Serge 2475
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2476
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2477
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2478
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2479
};
2480
 
2481
/* The FDI link training functions for SNB/Cougarpoint. */
2482
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2483
{
2484
    struct drm_device *dev = crtc->dev;
2485
    struct drm_i915_private *dev_priv = dev->dev_private;
2486
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2487
    int pipe = intel_crtc->pipe;
3031 serge 2488
	u32 reg, temp, i, retry;
2327 Serge 2489
 
2490
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2491
       for train result */
2492
    reg = FDI_RX_IMR(pipe);
2493
    temp = I915_READ(reg);
2494
    temp &= ~FDI_RX_SYMBOL_LOCK;
2495
    temp &= ~FDI_RX_BIT_LOCK;
2496
    I915_WRITE(reg, temp);
2497
 
2498
    POSTING_READ(reg);
2499
    udelay(150);
2500
 
2501
    /* enable CPU FDI TX and PCH FDI RX */
2502
    reg = FDI_TX_CTL(pipe);
2503
    temp = I915_READ(reg);
4104 Serge 2504
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2505
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 2506
    temp &= ~FDI_LINK_TRAIN_NONE;
2507
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2508
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2509
    /* SNB-B */
2510
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2511
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2512
 
3243 Serge 2513
	I915_WRITE(FDI_RX_MISC(pipe),
2514
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2515
 
2327 Serge 2516
    reg = FDI_RX_CTL(pipe);
2517
    temp = I915_READ(reg);
2518
    if (HAS_PCH_CPT(dev)) {
2519
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2520
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2521
    } else {
2522
        temp &= ~FDI_LINK_TRAIN_NONE;
2523
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2524
    }
2525
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2526
 
2527
    POSTING_READ(reg);
2528
    udelay(150);
2529
 
2342 Serge 2530
	for (i = 0; i < 4; i++) {
2327 Serge 2531
        reg = FDI_TX_CTL(pipe);
2532
        temp = I915_READ(reg);
2533
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2534
        temp |= snb_b_fdi_train_param[i];
2535
        I915_WRITE(reg, temp);
2536
 
2537
        POSTING_READ(reg);
2538
        udelay(500);
2539
 
3031 serge 2540
		for (retry = 0; retry < 5; retry++) {
2327 Serge 2541
        reg = FDI_RX_IIR(pipe);
2542
        temp = I915_READ(reg);
2543
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2544
        if (temp & FDI_RX_BIT_LOCK) {
2545
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2546
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2547
            break;
2548
        }
3031 serge 2549
			udelay(50);
2550
		}
2551
		if (retry < 5)
2552
			break;
2327 Serge 2553
    }
2554
    if (i == 4)
2555
        DRM_ERROR("FDI train 1 fail!\n");
2556
 
2557
    /* Train 2 */
2558
    reg = FDI_TX_CTL(pipe);
2559
    temp = I915_READ(reg);
2560
    temp &= ~FDI_LINK_TRAIN_NONE;
2561
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2562
    if (IS_GEN6(dev)) {
2563
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2564
        /* SNB-B */
2565
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2566
    }
2567
    I915_WRITE(reg, temp);
2568
 
2569
    reg = FDI_RX_CTL(pipe);
2570
    temp = I915_READ(reg);
2571
    if (HAS_PCH_CPT(dev)) {
2572
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2573
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2574
    } else {
2575
        temp &= ~FDI_LINK_TRAIN_NONE;
2576
        temp |= FDI_LINK_TRAIN_PATTERN_2;
2577
    }
2578
    I915_WRITE(reg, temp);
2579
 
2580
    POSTING_READ(reg);
2581
    udelay(150);
2582
 
2342 Serge 2583
	for (i = 0; i < 4; i++) {
2327 Serge 2584
        reg = FDI_TX_CTL(pipe);
2585
        temp = I915_READ(reg);
2586
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2587
        temp |= snb_b_fdi_train_param[i];
2588
        I915_WRITE(reg, temp);
2589
 
2590
        POSTING_READ(reg);
2591
        udelay(500);
2592
 
3031 serge 2593
		for (retry = 0; retry < 5; retry++) {
2327 Serge 2594
        reg = FDI_RX_IIR(pipe);
2595
        temp = I915_READ(reg);
2596
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2597
        if (temp & FDI_RX_SYMBOL_LOCK) {
2598
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2599
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2600
            break;
2601
        }
3031 serge 2602
			udelay(50);
2603
		}
2604
		if (retry < 5)
2605
			break;
2327 Serge 2606
    }
2607
    if (i == 4)
2608
        DRM_ERROR("FDI train 2 fail!\n");
2609
 
2610
    DRM_DEBUG_KMS("FDI train done.\n");
2611
}
2612
 
2613
/* Manual link training for Ivy Bridge A0 parts */
2614
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2615
{
2616
    struct drm_device *dev = crtc->dev;
2617
    struct drm_i915_private *dev_priv = dev->dev_private;
2618
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2619
    int pipe = intel_crtc->pipe;
4104 Serge 2620
	u32 reg, temp, i, j;
2327 Serge 2621
 
2622
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2623
       for train result */
2624
    reg = FDI_RX_IMR(pipe);
2625
    temp = I915_READ(reg);
2626
    temp &= ~FDI_RX_SYMBOL_LOCK;
2627
    temp &= ~FDI_RX_BIT_LOCK;
2628
    I915_WRITE(reg, temp);
2629
 
2630
    POSTING_READ(reg);
2631
    udelay(150);
2632
 
3243 Serge 2633
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2634
		      I915_READ(FDI_RX_IIR(pipe)));
2635
 
4104 Serge 2636
	/* Try each vswing and preemphasis setting twice before moving on */
2637
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
2638
		/* disable first in case we need to retry */
2639
		reg = FDI_TX_CTL(pipe);
2640
		temp = I915_READ(reg);
2641
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2642
		temp &= ~FDI_TX_ENABLE;
2643
		I915_WRITE(reg, temp);
2644
 
2645
		reg = FDI_RX_CTL(pipe);
2646
		temp = I915_READ(reg);
2647
		temp &= ~FDI_LINK_TRAIN_AUTO;
2648
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2649
		temp &= ~FDI_RX_ENABLE;
2650
		I915_WRITE(reg, temp);
2651
 
2327 Serge 2652
    /* enable CPU FDI TX and PCH FDI RX */
2653
    reg = FDI_TX_CTL(pipe);
2654
    temp = I915_READ(reg);
4104 Serge 2655
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2656
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2327 Serge 2657
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2658
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4104 Serge 2659
		temp |= snb_b_fdi_train_param[j/2];
2342 Serge 2660
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 2661
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2662
 
3243 Serge 2663
	I915_WRITE(FDI_RX_MISC(pipe),
2664
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2665
 
2327 Serge 2666
    reg = FDI_RX_CTL(pipe);
2667
    temp = I915_READ(reg);
2668
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2342 Serge 2669
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 2670
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2671
 
2672
    POSTING_READ(reg);
4104 Serge 2673
		udelay(1); /* should be 0.5us */
2327 Serge 2674
 
2342 Serge 2675
	for (i = 0; i < 4; i++) {
2327 Serge 2676
        reg = FDI_RX_IIR(pipe);
2677
        temp = I915_READ(reg);
2678
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2679
 
2680
        if (temp & FDI_RX_BIT_LOCK ||
2681
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2682
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4104 Serge 2683
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
2684
					      i);
2327 Serge 2685
            break;
2686
        }
4104 Serge 2687
			udelay(1); /* should be 0.5us */
2688
		}
2689
		if (i == 4) {
2690
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
2691
			continue;
2327 Serge 2692
    }
2693
 
2694
    /* Train 2 */
2695
    reg = FDI_TX_CTL(pipe);
2696
    temp = I915_READ(reg);
2697
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2698
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2699
    I915_WRITE(reg, temp);
2700
 
2701
    reg = FDI_RX_CTL(pipe);
2702
    temp = I915_READ(reg);
2703
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2704
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2705
    I915_WRITE(reg, temp);
2706
 
2707
    POSTING_READ(reg);
4104 Serge 2708
		udelay(2); /* should be 1.5us */
2327 Serge 2709
 
2342 Serge 2710
	for (i = 0; i < 4; i++) {
2327 Serge 2711
        reg = FDI_RX_IIR(pipe);
2712
        temp = I915_READ(reg);
2713
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2714
 
4104 Serge 2715
			if (temp & FDI_RX_SYMBOL_LOCK ||
2716
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2327 Serge 2717
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4104 Serge 2718
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
2719
					      i);
2720
				goto train_done;
2327 Serge 2721
        }
4104 Serge 2722
			udelay(2); /* should be 1.5us */
2327 Serge 2723
    }
2724
    if (i == 4)
4104 Serge 2725
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
2726
	}
2327 Serge 2727
 
4104 Serge 2728
train_done:
2327 Serge 2729
    DRM_DEBUG_KMS("FDI train done.\n");
2730
}
2731
 
3031 serge 2732
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2327 Serge 2733
{
3031 serge 2734
	struct drm_device *dev = intel_crtc->base.dev;
2327 Serge 2735
	struct drm_i915_private *dev_priv = dev->dev_private;
2736
	int pipe = intel_crtc->pipe;
2737
	u32 reg, temp;
2738
 
2739
 
2740
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2741
	reg = FDI_RX_CTL(pipe);
2742
	temp = I915_READ(reg);
4104 Serge 2743
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
2744
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3480 Serge 2745
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 2746
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2747
 
2748
	POSTING_READ(reg);
2749
	udelay(200);
2750
 
2751
	/* Switch from Rawclk to PCDclk */
2752
	temp = I915_READ(reg);
2753
	I915_WRITE(reg, temp | FDI_PCDCLK);
2754
 
2755
	POSTING_READ(reg);
2756
	udelay(200);
2757
 
2758
	/* Enable CPU FDI TX PLL, always on for Ironlake */
2759
	reg = FDI_TX_CTL(pipe);
2760
	temp = I915_READ(reg);
2761
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2762
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2763
 
2764
		POSTING_READ(reg);
2765
		udelay(100);
2766
	}
2767
}
2768
 
3031 serge 2769
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2770
{
2771
	struct drm_device *dev = intel_crtc->base.dev;
2772
	struct drm_i915_private *dev_priv = dev->dev_private;
2773
	int pipe = intel_crtc->pipe;
2774
	u32 reg, temp;
2775
 
2776
	/* Switch from PCDclk to Rawclk */
2777
	reg = FDI_RX_CTL(pipe);
2778
	temp = I915_READ(reg);
2779
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
2780
 
2781
	/* Disable CPU FDI TX PLL */
2782
	reg = FDI_TX_CTL(pipe);
2783
	temp = I915_READ(reg);
2784
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2785
 
2786
	POSTING_READ(reg);
2787
	udelay(100);
2788
 
2789
	reg = FDI_RX_CTL(pipe);
2790
	temp = I915_READ(reg);
2791
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2792
 
2793
	/* Wait for the clocks to turn off. */
2794
	POSTING_READ(reg);
2795
	udelay(100);
2796
}
2797
 
2327 Serge 2798
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2799
{
2800
	struct drm_device *dev = crtc->dev;
2801
	struct drm_i915_private *dev_priv = dev->dev_private;
2802
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2803
	int pipe = intel_crtc->pipe;
2804
	u32 reg, temp;
2805
 
2806
	/* disable CPU FDI tx and PCH FDI rx */
2807
	reg = FDI_TX_CTL(pipe);
2808
	temp = I915_READ(reg);
2809
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2810
	POSTING_READ(reg);
2811
 
2812
	reg = FDI_RX_CTL(pipe);
2813
	temp = I915_READ(reg);
2814
	temp &= ~(0x7 << 16);
3480 Serge 2815
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 2816
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2817
 
2818
	POSTING_READ(reg);
2819
	udelay(100);
2820
 
2821
	/* Ironlake workaround, disable clock pointer after downing FDI */
2822
	if (HAS_PCH_IBX(dev)) {
2823
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2824
	}
2825
 
2826
	/* still set train pattern 1 */
2827
	reg = FDI_TX_CTL(pipe);
2828
	temp = I915_READ(reg);
2829
	temp &= ~FDI_LINK_TRAIN_NONE;
2830
	temp |= FDI_LINK_TRAIN_PATTERN_1;
2831
	I915_WRITE(reg, temp);
2832
 
2833
	reg = FDI_RX_CTL(pipe);
2834
	temp = I915_READ(reg);
2835
	if (HAS_PCH_CPT(dev)) {
2836
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2837
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2838
	} else {
2839
		temp &= ~FDI_LINK_TRAIN_NONE;
2840
		temp |= FDI_LINK_TRAIN_PATTERN_1;
2841
	}
2842
	/* BPC in FDI rx is consistent with that in PIPECONF */
2843
	temp &= ~(0x07 << 16);
3480 Serge 2844
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2327 Serge 2845
	I915_WRITE(reg, temp);
2846
 
2847
	POSTING_READ(reg);
2848
	udelay(100);
2849
}
2850
 
3031 serge 2851
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2327 Serge 2852
{
3031 serge 2853
	struct drm_device *dev = crtc->dev;
2327 Serge 2854
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 2855
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 2856
	unsigned long flags;
2857
	bool pending;
2327 Serge 2858
 
3480 Serge 2859
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2860
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3031 serge 2861
		return false;
2327 Serge 2862
 
3031 serge 2863
	spin_lock_irqsave(&dev->event_lock, flags);
2864
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2865
	spin_unlock_irqrestore(&dev->event_lock, flags);
2866
 
2867
	return pending;
2327 Serge 2868
}
2869
 
3031 serge 2870
#if 0
2327 Serge 2871
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2872
{
3031 serge 2873
	struct drm_device *dev = crtc->dev;
2874
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 2875
 
2876
	if (crtc->fb == NULL)
2877
		return;
2878
 
3480 Serge 2879
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
2880
 
2360 Serge 2881
	wait_event(dev_priv->pending_flip_queue,
3031 serge 2882
		   !intel_crtc_has_pending_flip(crtc));
2883
 
2884
	mutex_lock(&dev->struct_mutex);
2885
	intel_finish_fb(crtc->fb);
2886
	mutex_unlock(&dev->struct_mutex);
2327 Serge 2887
}
3031 serge 2888
#endif
2327 Serge 2889
 
3031 serge 2890
/* Program iCLKIP clock to the desired frequency */
2891
static void lpt_program_iclkip(struct drm_crtc *crtc)
2892
{
2893
	struct drm_device *dev = crtc->dev;
2894
	struct drm_i915_private *dev_priv = dev->dev_private;
2895
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
2896
	u32 temp;
2897
 
3480 Serge 2898
	mutex_lock(&dev_priv->dpio_lock);
2899
 
3031 serge 2900
	/* It is necessary to ungate the pixclk gate prior to programming
2901
	 * the divisors, and gate it back when it is done.
2902
	 */
2903
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2904
 
2905
	/* Disable SSCCTL */
2906
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3243 Serge 2907
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
2908
				SBI_SSCCTL_DISABLE,
2909
			SBI_ICLK);
3031 serge 2910
 
2911
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
2912
	if (crtc->mode.clock == 20000) {
2913
		auxdiv = 1;
2914
		divsel = 0x41;
2915
		phaseinc = 0x20;
2916
	} else {
2917
		/* The iCLK virtual clock root frequency is in MHz,
2918
		 * but the crtc->mode.clock in in KHz. To get the divisors,
2919
		 * it is necessary to divide one by another, so we
2920
		 * convert the virtual clock precision to KHz here for higher
2921
		 * precision.
2922
		 */
2923
		u32 iclk_virtual_root_freq = 172800 * 1000;
2924
		u32 iclk_pi_range = 64;
2925
		u32 desired_divisor, msb_divisor_value, pi_value;
2926
 
2927
		desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2928
		msb_divisor_value = desired_divisor / iclk_pi_range;
2929
		pi_value = desired_divisor % iclk_pi_range;
2930
 
2931
		auxdiv = 0;
2932
		divsel = msb_divisor_value - 2;
2933
		phaseinc = pi_value;
2934
	}
2935
 
2936
	/* This should not happen with any sane values */
2937
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2938
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2939
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
2940
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2941
 
2942
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2943
			crtc->mode.clock,
2944
			auxdiv,
2945
			divsel,
2946
			phasedir,
2947
			phaseinc);
2948
 
2949
	/* Program SSCDIVINTPHASE6 */
3243 Serge 2950
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3031 serge 2951
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2952
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2953
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2954
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2955
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2956
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3243 Serge 2957
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3031 serge 2958
 
2959
	/* Program SSCAUXDIV */
3243 Serge 2960
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3031 serge 2961
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2962
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3243 Serge 2963
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3031 serge 2964
 
2965
	/* Enable modulator and associated divider */
3243 Serge 2966
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3031 serge 2967
	temp &= ~SBI_SSCCTL_DISABLE;
3243 Serge 2968
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3031 serge 2969
 
2970
	/* Wait for initialization time */
2971
	udelay(24);
2972
 
2973
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3480 Serge 2974
 
2975
	mutex_unlock(&dev_priv->dpio_lock);
3031 serge 2976
}
2977
 
4104 Serge 2978
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
2979
						enum pipe pch_transcoder)
2980
{
2981
	struct drm_device *dev = crtc->base.dev;
2982
	struct drm_i915_private *dev_priv = dev->dev_private;
2983
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
2984
 
2985
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
2986
		   I915_READ(HTOTAL(cpu_transcoder)));
2987
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
2988
		   I915_READ(HBLANK(cpu_transcoder)));
2989
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
2990
		   I915_READ(HSYNC(cpu_transcoder)));
2991
 
2992
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
2993
		   I915_READ(VTOTAL(cpu_transcoder)));
2994
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
2995
		   I915_READ(VBLANK(cpu_transcoder)));
2996
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
2997
		   I915_READ(VSYNC(cpu_transcoder)));
2998
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2999
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3000
}
3001
 
4280 Serge 3002
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3003
{
3004
	struct drm_i915_private *dev_priv = dev->dev_private;
3005
	uint32_t temp;
3006
 
3007
	temp = I915_READ(SOUTH_CHICKEN1);
3008
	if (temp & FDI_BC_BIFURCATION_SELECT)
3009
		return;
3010
 
3011
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3012
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3013
 
3014
	temp |= FDI_BC_BIFURCATION_SELECT;
3015
	DRM_DEBUG_KMS("enabling fdi C rx\n");
3016
	I915_WRITE(SOUTH_CHICKEN1, temp);
3017
	POSTING_READ(SOUTH_CHICKEN1);
3018
}
3019
 
3020
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3021
{
3022
	struct drm_device *dev = intel_crtc->base.dev;
3023
	struct drm_i915_private *dev_priv = dev->dev_private;
3024
 
3025
	switch (intel_crtc->pipe) {
3026
	case PIPE_A:
3027
		break;
3028
	case PIPE_B:
3029
		if (intel_crtc->config.fdi_lanes > 2)
3030
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3031
		else
3032
			cpt_enable_fdi_bc_bifurcation(dev);
3033
 
3034
		break;
3035
	case PIPE_C:
3036
		cpt_enable_fdi_bc_bifurcation(dev);
3037
 
3038
		break;
3039
	default:
3040
		BUG();
3041
	}
3042
}
3043
 
2327 Serge 3044
/*
3045
 * Enable PCH resources required for PCH ports:
3046
 *   - PCH PLLs
3047
 *   - FDI training & RX/TX
3048
 *   - update transcoder timings
3049
 *   - DP transcoding bits
3050
 *   - transcoder
3051
 */
3052
static void ironlake_pch_enable(struct drm_crtc *crtc)
3053
{
3054
	struct drm_device *dev = crtc->dev;
3055
	struct drm_i915_private *dev_priv = dev->dev_private;
3056
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3057
	int pipe = intel_crtc->pipe;
3031 serge 3058
	u32 reg, temp;
2327 Serge 3059
 
4104 Serge 3060
	assert_pch_transcoder_disabled(dev_priv, pipe);
3031 serge 3061
 
4280 Serge 3062
	if (IS_IVYBRIDGE(dev))
3063
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3064
 
3243 Serge 3065
	/* Write the TU size bits before fdi link training, so that error
3066
	 * detection works. */
3067
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3068
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3069
 
2327 Serge 3070
	/* For PCH output, training FDI link */
3071
	dev_priv->display.fdi_link_train(crtc);
3072
 
4104 Serge 3073
	/* We need to program the right clock selection before writing the pixel
3074
	 * mutliplier into the DPLL. */
3243 Serge 3075
	if (HAS_PCH_CPT(dev)) {
3031 serge 3076
		u32 sel;
2342 Serge 3077
 
2327 Serge 3078
		temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 3079
		temp |= TRANS_DPLL_ENABLE(pipe);
3080
		sel = TRANS_DPLLB_SEL(pipe);
3081
		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3031 serge 3082
			temp |= sel;
3083
		else
3084
			temp &= ~sel;
2327 Serge 3085
		I915_WRITE(PCH_DPLL_SEL, temp);
3086
	}
3087
 
4104 Serge 3088
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3089
	 * transcoder, and we actually should do this to not upset any PCH
3090
	 * transcoder that already use the clock when we share it.
3091
	 *
3092
	 * Note that enable_shared_dpll tries to do the right thing, but
3093
	 * get_shared_dpll unconditionally resets the pll - we need that to have
3094
	 * the right LVDS enable sequence. */
3095
	ironlake_enable_shared_dpll(intel_crtc);
3096
 
2327 Serge 3097
	/* set transcoder timing, panel must allow it */
3098
	assert_panel_unlocked(dev_priv, pipe);
4104 Serge 3099
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
2327 Serge 3100
 
3101
	intel_fdi_normal_train(crtc);
3102
 
3103
	/* For PCH DP, enable TRANS_DP_CTL */
3104
	if (HAS_PCH_CPT(dev) &&
2342 Serge 3105
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3106
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3480 Serge 3107
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2327 Serge 3108
		reg = TRANS_DP_CTL(pipe);
3109
		temp = I915_READ(reg);
3110
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3111
			  TRANS_DP_SYNC_MASK |
3112
			  TRANS_DP_BPC_MASK);
3113
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3114
			 TRANS_DP_ENH_FRAMING);
3115
		temp |= bpc << 9; /* same format but at 11:9 */
3116
 
3117
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3118
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3119
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3120
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3121
 
3122
		switch (intel_trans_dp_port_sel(crtc)) {
3123
		case PCH_DP_B:
3124
			temp |= TRANS_DP_PORT_SEL_B;
3125
			break;
3126
		case PCH_DP_C:
3127
			temp |= TRANS_DP_PORT_SEL_C;
3128
			break;
3129
		case PCH_DP_D:
3130
			temp |= TRANS_DP_PORT_SEL_D;
3131
			break;
3132
		default:
3243 Serge 3133
			BUG();
2327 Serge 3134
		}
3135
 
3136
		I915_WRITE(reg, temp);
3137
	}
3138
 
3243 Serge 3139
	ironlake_enable_pch_transcoder(dev_priv, pipe);
2327 Serge 3140
}
3141
 
3243 Serge 3142
static void lpt_pch_enable(struct drm_crtc *crtc)
3143
{
3144
	struct drm_device *dev = crtc->dev;
3145
	struct drm_i915_private *dev_priv = dev->dev_private;
3146
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 3147
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 3148
 
4104 Serge 3149
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3243 Serge 3150
 
3151
	lpt_program_iclkip(crtc);
3152
 
3153
	/* Set transcoder timing. */
4104 Serge 3154
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3243 Serge 3155
 
3156
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3157
}
3158
 
4104 Serge 3159
static void intel_put_shared_dpll(struct intel_crtc *crtc)
3031 serge 3160
{
4104 Serge 3161
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3031 serge 3162
 
3163
	if (pll == NULL)
3164
		return;
3165
 
3166
	if (pll->refcount == 0) {
4104 Serge 3167
		WARN(1, "bad %s refcount\n", pll->name);
3031 serge 3168
		return;
3169
	}
3170
 
4104 Serge 3171
	if (--pll->refcount == 0) {
3172
		WARN_ON(pll->on);
3173
		WARN_ON(pll->active);
3174
	}
3175
 
3176
	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3031 serge 3177
}
3178
 
4104 Serge 3179
static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3031 serge 3180
{
4104 Serge 3181
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3182
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3183
	enum intel_dpll_id i;
3031 serge 3184
 
3185
	if (pll) {
4104 Serge 3186
		DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3187
			      crtc->base.base.id, pll->name);
3188
		intel_put_shared_dpll(crtc);
3031 serge 3189
	}
3190
 
3191
	if (HAS_PCH_IBX(dev_priv->dev)) {
3192
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4104 Serge 3193
		i = (enum intel_dpll_id) crtc->pipe;
3194
		pll = &dev_priv->shared_dplls[i];
3031 serge 3195
 
4104 Serge 3196
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3197
			      crtc->base.base.id, pll->name);
3031 serge 3198
 
3199
		goto found;
3200
	}
3201
 
4104 Serge 3202
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3203
		pll = &dev_priv->shared_dplls[i];
3031 serge 3204
 
3205
		/* Only want to check enabled timings first */
3206
		if (pll->refcount == 0)
3207
			continue;
3208
 
4104 Serge 3209
		if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3210
			   sizeof(pll->hw_state)) == 0) {
3211
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3212
				      crtc->base.base.id,
3213
				      pll->name, pll->refcount, pll->active);
3031 serge 3214
 
3215
			goto found;
3216
		}
3217
	}
3218
 
3219
	/* Ok no matching timings, maybe there's a free one? */
4104 Serge 3220
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3221
		pll = &dev_priv->shared_dplls[i];
3031 serge 3222
		if (pll->refcount == 0) {
4104 Serge 3223
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3224
				      crtc->base.base.id, pll->name);
3031 serge 3225
			goto found;
3226
		}
3227
	}
3228
 
3229
	return NULL;
3230
 
3231
found:
4104 Serge 3232
	crtc->config.shared_dpll = i;
3233
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3234
			 pipe_name(crtc->pipe));
3235
 
3236
	if (pll->active == 0) {
3237
		memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
3238
		       sizeof(pll->hw_state));
3239
 
3240
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
3241
		WARN_ON(pll->on);
3242
		assert_shared_dpll_disabled(dev_priv, pll);
3243
 
3244
		pll->mode_set(dev_priv, pll);
3245
	}
3031 serge 3246
	pll->refcount++;
3247
 
3248
	return pll;
3249
}
3250
 
4104 Serge 3251
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
2342 Serge 3252
{
3253
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 3254
	int dslreg = PIPEDSL(pipe);
2342 Serge 3255
	u32 temp;
3256
 
3257
	temp = I915_READ(dslreg);
3258
	udelay(500);
3259
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
3260
		if (wait_for(I915_READ(dslreg) != temp, 5))
4104 Serge 3261
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
2342 Serge 3262
	}
3263
}
3264
 
4104 Serge 3265
static void ironlake_pfit_enable(struct intel_crtc *crtc)
3266
{
3267
	struct drm_device *dev = crtc->base.dev;
3268
	struct drm_i915_private *dev_priv = dev->dev_private;
3269
	int pipe = crtc->pipe;
3270
 
3271
	if (crtc->config.pch_pfit.enabled) {
3272
		/* Force use of hard-coded filter coefficients
3273
		 * as some pre-programmed values are broken,
3274
		 * e.g. x201.
3275
		 */
3276
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3277
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3278
						 PF_PIPE_SEL_IVB(pipe));
3279
		else
3280
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3281
		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3282
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3283
	}
3284
}
3285
 
3286
static void intel_enable_planes(struct drm_crtc *crtc)
3287
{
3288
	struct drm_device *dev = crtc->dev;
3289
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3290
	struct intel_plane *intel_plane;
3291
 
3292
	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3293
		if (intel_plane->pipe == pipe)
3294
			intel_plane_restore(&intel_plane->base);
3295
}
3296
 
3297
static void intel_disable_planes(struct drm_crtc *crtc)
3298
{
3299
	struct drm_device *dev = crtc->dev;
3300
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3301
	struct intel_plane *intel_plane;
3302
 
3303
	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3304
		if (intel_plane->pipe == pipe)
3305
			intel_plane_disable(&intel_plane->base);
3306
}
3307
 
2327 Serge 3308
static void ironlake_crtc_enable(struct drm_crtc *crtc)
3309
{
3310
    struct drm_device *dev = crtc->dev;
3311
    struct drm_i915_private *dev_priv = dev->dev_private;
3312
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3313
	struct intel_encoder *encoder;
2327 Serge 3314
    int pipe = intel_crtc->pipe;
3315
    int plane = intel_crtc->plane;
3316
 
3031 serge 3317
	WARN_ON(!crtc->enabled);
3318
 
2327 Serge 3319
    if (intel_crtc->active)
3320
        return;
3321
 
3322
    intel_crtc->active = true;
4104 Serge 3323
 
3324
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3325
	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3326
 
2327 Serge 3327
    intel_update_watermarks(dev);
3328
 
4104 Serge 3329
	for_each_encoder_on_crtc(dev, crtc, encoder)
3330
		if (encoder->pre_enable)
3331
			encoder->pre_enable(encoder);
2327 Serge 3332
 
3746 Serge 3333
	if (intel_crtc->config.has_pch_encoder) {
3243 Serge 3334
		/* Note: FDI PLL enabling _must_ be done before we enable the
3335
		 * cpu pipes, hence this is separate from all the other fdi/pch
3336
		 * enabling. */
3031 serge 3337
		ironlake_fdi_pll_enable(intel_crtc);
3338
	} else {
3339
		assert_fdi_tx_disabled(dev_priv, pipe);
3340
		assert_fdi_rx_disabled(dev_priv, pipe);
3341
	}
2327 Serge 3342
 
4104 Serge 3343
	ironlake_pfit_enable(intel_crtc);
3031 serge 3344
 
2327 Serge 3345
    /*
3346
     * On ILK+ LUT must be loaded before the pipe is running but with
3347
     * clocks enabled
3348
     */
3349
    intel_crtc_load_lut(crtc);
3350
 
3746 Serge 3351
	intel_enable_pipe(dev_priv, pipe,
3352
			  intel_crtc->config.has_pch_encoder);
2327 Serge 3353
    intel_enable_plane(dev_priv, plane, pipe);
4104 Serge 3354
	intel_enable_planes(crtc);
4557 Serge 3355
	intel_crtc_update_cursor(crtc, true);
2327 Serge 3356
 
3746 Serge 3357
	if (intel_crtc->config.has_pch_encoder)
2327 Serge 3358
        ironlake_pch_enable(crtc);
3359
 
3360
    mutex_lock(&dev->struct_mutex);
3361
    intel_update_fbc(dev);
3362
    mutex_unlock(&dev->struct_mutex);
3363
 
3031 serge 3364
	for_each_encoder_on_crtc(dev, crtc, encoder)
3365
		encoder->enable(encoder);
3366
 
3367
	if (HAS_PCH_CPT(dev))
4104 Serge 3368
		cpt_verify_modeset(dev, intel_crtc->pipe);
3031 serge 3369
 
3370
	/*
3371
	 * There seems to be a race in PCH platform hw (at least on some
3372
	 * outputs) where an enabled pipe still completes any pageflip right
3373
	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3374
	 * as the first vblank happend, everything works as expected. Hence just
3375
	 * wait for one vblank before returning to avoid strange things
3376
	 * happening.
3377
	 */
3378
	intel_wait_for_vblank(dev, intel_crtc->pipe);
2327 Serge 3379
}
3380
 
4104 Serge 3381
/* IPS only exists on ULT machines and is tied to pipe A. */
3382
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3383
{
3384
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3385
}
3386
 
3387
static void hsw_enable_ips(struct intel_crtc *crtc)
3388
{
3389
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3390
 
3391
	if (!crtc->config.ips_enabled)
3392
		return;
3393
 
3394
	/* We can only enable IPS after we enable a plane and wait for a vblank.
3395
	 * We guarantee that the plane is enabled by calling intel_enable_ips
3396
	 * only after intel_enable_plane. And intel_enable_plane already waits
3397
	 * for a vblank, so all we need to do here is to enable the IPS bit. */
3398
	assert_plane_enabled(dev_priv, crtc->plane);
3399
	I915_WRITE(IPS_CTL, IPS_ENABLE);
3400
}
3401
 
3402
static void hsw_disable_ips(struct intel_crtc *crtc)
3403
{
3404
	struct drm_device *dev = crtc->base.dev;
3405
	struct drm_i915_private *dev_priv = dev->dev_private;
3406
 
3407
	if (!crtc->config.ips_enabled)
3408
		return;
3409
 
3410
	assert_plane_enabled(dev_priv, crtc->plane);
3411
	I915_WRITE(IPS_CTL, 0);
3412
 
3413
	/* We need to wait for a vblank before we can disable the plane. */
3414
	intel_wait_for_vblank(dev, crtc->pipe);
3415
}
3416
 
3243 Serge 3417
static void haswell_crtc_enable(struct drm_crtc *crtc)
3418
{
3419
	struct drm_device *dev = crtc->dev;
3420
	struct drm_i915_private *dev_priv = dev->dev_private;
3421
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3422
	struct intel_encoder *encoder;
3423
	int pipe = intel_crtc->pipe;
3424
	int plane = intel_crtc->plane;
3425
 
3426
	WARN_ON(!crtc->enabled);
3427
 
3428
	if (intel_crtc->active)
3429
		return;
3430
 
3431
	intel_crtc->active = true;
4104 Serge 3432
 
3433
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3434
	if (intel_crtc->config.has_pch_encoder)
3435
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3436
 
3243 Serge 3437
	intel_update_watermarks(dev);
3438
 
3746 Serge 3439
	if (intel_crtc->config.has_pch_encoder)
3243 Serge 3440
		dev_priv->display.fdi_link_train(crtc);
3441
 
3442
	for_each_encoder_on_crtc(dev, crtc, encoder)
3443
		if (encoder->pre_enable)
3444
			encoder->pre_enable(encoder);
3445
 
3446
	intel_ddi_enable_pipe_clock(intel_crtc);
3447
 
4104 Serge 3448
	ironlake_pfit_enable(intel_crtc);
3243 Serge 3449
 
3450
	/*
3451
	 * On ILK+ LUT must be loaded before the pipe is running but with
3452
	 * clocks enabled
3453
	 */
3454
	intel_crtc_load_lut(crtc);
3455
 
3456
	intel_ddi_set_pipe_settings(crtc);
3746 Serge 3457
	intel_ddi_enable_transcoder_func(crtc);
3243 Serge 3458
 
3746 Serge 3459
	intel_enable_pipe(dev_priv, pipe,
3460
			  intel_crtc->config.has_pch_encoder);
3243 Serge 3461
	intel_enable_plane(dev_priv, plane, pipe);
4104 Serge 3462
	intel_enable_planes(crtc);
4557 Serge 3463
	intel_crtc_update_cursor(crtc, true);
3243 Serge 3464
 
4104 Serge 3465
	hsw_enable_ips(intel_crtc);
3466
 
3746 Serge 3467
	if (intel_crtc->config.has_pch_encoder)
3243 Serge 3468
		lpt_pch_enable(crtc);
3469
 
3470
	mutex_lock(&dev->struct_mutex);
3471
	intel_update_fbc(dev);
3472
	mutex_unlock(&dev->struct_mutex);
3473
 
3474
	for_each_encoder_on_crtc(dev, crtc, encoder)
3475
		encoder->enable(encoder);
3476
 
3477
	/*
3478
	 * There seems to be a race in PCH platform hw (at least on some
3479
	 * outputs) where an enabled pipe still completes any pageflip right
3480
	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3481
	 * as the first vblank happend, everything works as expected. Hence just
3482
	 * wait for one vblank before returning to avoid strange things
3483
	 * happening.
3484
	 */
3485
	intel_wait_for_vblank(dev, intel_crtc->pipe);
3486
}
3487
 
4104 Serge 3488
static void ironlake_pfit_disable(struct intel_crtc *crtc)
3489
{
3490
	struct drm_device *dev = crtc->base.dev;
3491
	struct drm_i915_private *dev_priv = dev->dev_private;
3492
	int pipe = crtc->pipe;
3493
 
3494
	/* To avoid upsetting the power well on haswell only disable the pfit if
3495
	 * it's in use. The hw state code will make sure we get this right. */
3496
	if (crtc->config.pch_pfit.enabled) {
3497
		I915_WRITE(PF_CTL(pipe), 0);
3498
		I915_WRITE(PF_WIN_POS(pipe), 0);
3499
		I915_WRITE(PF_WIN_SZ(pipe), 0);
3500
	}
3501
}
3502
 
2327 Serge 3503
static void ironlake_crtc_disable(struct drm_crtc *crtc)
3504
{
3505
    struct drm_device *dev = crtc->dev;
3506
    struct drm_i915_private *dev_priv = dev->dev_private;
3507
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3508
	struct intel_encoder *encoder;
2327 Serge 3509
    int pipe = intel_crtc->pipe;
3510
    int plane = intel_crtc->plane;
3511
    u32 reg, temp;
3512
 
3031 serge 3513
 
2327 Serge 3514
    if (!intel_crtc->active)
3515
        return;
3516
 
3031 serge 3517
	for_each_encoder_on_crtc(dev, crtc, encoder)
3518
		encoder->disable(encoder);
2336 Serge 3519
 
3031 serge 3520
//    intel_crtc_wait_for_pending_flips(crtc);
2327 Serge 3521
//    drm_vblank_off(dev, pipe);
3522
 
4104 Serge 3523
	if (dev_priv->fbc.plane == plane)
3524
		intel_disable_fbc(dev);
3525
 
4557 Serge 3526
	intel_crtc_update_cursor(crtc, false);
4104 Serge 3527
	intel_disable_planes(crtc);
2327 Serge 3528
    intel_disable_plane(dev_priv, plane, pipe);
3529
 
4104 Serge 3530
	if (intel_crtc->config.has_pch_encoder)
3531
		intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
2327 Serge 3532
 
3533
    intel_disable_pipe(dev_priv, pipe);
3534
 
4104 Serge 3535
	ironlake_pfit_disable(intel_crtc);
2327 Serge 3536
 
3031 serge 3537
	for_each_encoder_on_crtc(dev, crtc, encoder)
3538
		if (encoder->post_disable)
3539
			encoder->post_disable(encoder);
3540
 
4104 Serge 3541
	if (intel_crtc->config.has_pch_encoder) {
2327 Serge 3542
    ironlake_fdi_disable(crtc);
3543
 
3243 Serge 3544
	ironlake_disable_pch_transcoder(dev_priv, pipe);
4104 Serge 3545
		intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
2327 Serge 3546
 
3547
    if (HAS_PCH_CPT(dev)) {
3548
        /* disable TRANS_DP_CTL */
3549
        reg = TRANS_DP_CTL(pipe);
3550
        temp = I915_READ(reg);
4104 Serge 3551
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3552
				  TRANS_DP_PORT_SEL_MASK);
2327 Serge 3553
        temp |= TRANS_DP_PORT_SEL_NONE;
3554
        I915_WRITE(reg, temp);
3555
 
3556
        /* disable DPLL_SEL */
3557
        temp = I915_READ(PCH_DPLL_SEL);
4104 Serge 3558
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
2327 Serge 3559
        I915_WRITE(PCH_DPLL_SEL, temp);
3560
    }
3561
 
3562
    /* disable PCH DPLL */
4104 Serge 3563
		intel_disable_shared_dpll(intel_crtc);
2327 Serge 3564
 
3031 serge 3565
	ironlake_fdi_pll_disable(intel_crtc);
4104 Serge 3566
	}
2327 Serge 3567
 
3568
    intel_crtc->active = false;
3569
    intel_update_watermarks(dev);
3570
 
3571
    mutex_lock(&dev->struct_mutex);
3572
    intel_update_fbc(dev);
3573
    mutex_unlock(&dev->struct_mutex);
3574
}
3575
 
3243 Serge 3576
static void haswell_crtc_disable(struct drm_crtc *crtc)
3577
{
3578
	struct drm_device *dev = crtc->dev;
3579
	struct drm_i915_private *dev_priv = dev->dev_private;
3580
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3581
	struct intel_encoder *encoder;
3582
	int pipe = intel_crtc->pipe;
3583
	int plane = intel_crtc->plane;
3746 Serge 3584
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 3585
 
3586
	if (!intel_crtc->active)
3587
		return;
3588
 
3589
	for_each_encoder_on_crtc(dev, crtc, encoder)
3590
		encoder->disable(encoder);
3591
 
3592
 
4104 Serge 3593
	/* FBC must be disabled before disabling the plane on HSW. */
3594
	if (dev_priv->fbc.plane == plane)
3595
		intel_disable_fbc(dev);
3596
 
3597
	hsw_disable_ips(intel_crtc);
3598
 
4557 Serge 3599
	intel_crtc_update_cursor(crtc, false);
4104 Serge 3600
	intel_disable_planes(crtc);
3243 Serge 3601
	intel_disable_plane(dev_priv, plane, pipe);
3602
 
4104 Serge 3603
	if (intel_crtc->config.has_pch_encoder)
3604
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
3243 Serge 3605
	intel_disable_pipe(dev_priv, pipe);
3606
 
3607
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3608
 
4104 Serge 3609
	ironlake_pfit_disable(intel_crtc);
3243 Serge 3610
 
3611
	intel_ddi_disable_pipe_clock(intel_crtc);
3612
 
3613
	for_each_encoder_on_crtc(dev, crtc, encoder)
3614
		if (encoder->post_disable)
3615
			encoder->post_disable(encoder);
3616
 
3746 Serge 3617
	if (intel_crtc->config.has_pch_encoder) {
3243 Serge 3618
		lpt_disable_pch_transcoder(dev_priv);
4104 Serge 3619
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3243 Serge 3620
		intel_ddi_fdi_disable(crtc);
3621
	}
3622
 
3623
	intel_crtc->active = false;
3624
	intel_update_watermarks(dev);
3625
 
3626
	mutex_lock(&dev->struct_mutex);
3627
	intel_update_fbc(dev);
3628
	mutex_unlock(&dev->struct_mutex);
3629
}
3630
 
3031 serge 3631
static void ironlake_crtc_off(struct drm_crtc *crtc)
2327 Serge 3632
{
3633
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4104 Serge 3634
	intel_put_shared_dpll(intel_crtc);
2327 Serge 3635
}
3636
 
3243 Serge 3637
static void haswell_crtc_off(struct drm_crtc *crtc)
3638
{
3639
	intel_ddi_put_crtc_pll(crtc);
3640
}
3641
 
2327 Serge 3642
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3643
{
3644
	if (!enable && intel_crtc->overlay) {
3645
		struct drm_device *dev = intel_crtc->base.dev;
3646
		struct drm_i915_private *dev_priv = dev->dev_private;
3647
 
3648
		mutex_lock(&dev->struct_mutex);
3649
		dev_priv->mm.interruptible = false;
3650
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3651
		dev_priv->mm.interruptible = true;
3652
		mutex_unlock(&dev->struct_mutex);
3653
	}
3654
 
3655
	/* Let userspace switch the overlay on again. In most cases userspace
3656
	 * has to recompute where to put it anyway.
3657
	 */
3658
}
3659
 
3480 Serge 3660
/**
3661
 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3662
 * cursor plane briefly if not already running after enabling the display
3663
 * plane.
3664
 * This workaround avoids occasional blank screens when self refresh is
3665
 * enabled.
3666
 */
3667
static void
3668
g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3669
{
3670
	u32 cntl = I915_READ(CURCNTR(pipe));
3671
 
3672
	if ((cntl & CURSOR_MODE) == 0) {
3673
		u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3674
 
3675
		I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3676
		I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3677
		intel_wait_for_vblank(dev_priv->dev, pipe);
3678
		I915_WRITE(CURCNTR(pipe), cntl);
3679
		I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3680
		I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3681
	}
3682
}
3683
 
4104 Serge 3684
static void i9xx_pfit_enable(struct intel_crtc *crtc)
3685
{
3686
	struct drm_device *dev = crtc->base.dev;
3687
	struct drm_i915_private *dev_priv = dev->dev_private;
3688
	struct intel_crtc_config *pipe_config = &crtc->config;
3689
 
3690
	if (!crtc->config.gmch_pfit.control)
3691
		return;
3692
 
3693
	/*
3694
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3695
	 * according to register description and PRM.
3696
	 */
3697
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
3698
	assert_pipe_disabled(dev_priv, crtc->pipe);
3699
 
3700
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
3701
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
3702
 
3703
	/* Border color in case we don't scale up to the full screen. Black by
3704
	 * default, change to something else for debugging. */
3705
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
3706
}
3707
 
3708
static void valleyview_crtc_enable(struct drm_crtc *crtc)
3709
{
3710
	struct drm_device *dev = crtc->dev;
3711
	struct drm_i915_private *dev_priv = dev->dev_private;
3712
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3713
	struct intel_encoder *encoder;
3714
	int pipe = intel_crtc->pipe;
3715
	int plane = intel_crtc->plane;
3716
 
3717
	WARN_ON(!crtc->enabled);
3718
 
3719
	if (intel_crtc->active)
3720
		return;
3721
 
3722
	intel_crtc->active = true;
3723
	intel_update_watermarks(dev);
3724
 
3725
	for_each_encoder_on_crtc(dev, crtc, encoder)
3726
		if (encoder->pre_pll_enable)
3727
			encoder->pre_pll_enable(encoder);
3728
 
3729
	vlv_enable_pll(intel_crtc);
3730
 
3731
	for_each_encoder_on_crtc(dev, crtc, encoder)
3732
		if (encoder->pre_enable)
3733
			encoder->pre_enable(encoder);
3734
 
3735
	i9xx_pfit_enable(intel_crtc);
3736
 
3737
	intel_crtc_load_lut(crtc);
3738
 
3739
	intel_enable_pipe(dev_priv, pipe, false);
3740
	intel_enable_plane(dev_priv, plane, pipe);
3741
	intel_enable_planes(crtc);
4557 Serge 3742
	intel_crtc_update_cursor(crtc, true);
4104 Serge 3743
 
3744
	intel_update_fbc(dev);
3745
 
3746
	for_each_encoder_on_crtc(dev, crtc, encoder)
3747
		encoder->enable(encoder);
3748
}
3749
 
2327 Serge 3750
static void i9xx_crtc_enable(struct drm_crtc *crtc)
3751
{
3752
    struct drm_device *dev = crtc->dev;
3753
    struct drm_i915_private *dev_priv = dev->dev_private;
3754
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3755
	struct intel_encoder *encoder;
2327 Serge 3756
    int pipe = intel_crtc->pipe;
3757
    int plane = intel_crtc->plane;
3758
 
3031 serge 3759
	WARN_ON(!crtc->enabled);
3760
 
2327 Serge 3761
    if (intel_crtc->active)
3762
        return;
3763
 
3764
    intel_crtc->active = true;
3765
    intel_update_watermarks(dev);
3766
 
3480 Serge 3767
	for_each_encoder_on_crtc(dev, crtc, encoder)
3768
		if (encoder->pre_enable)
3769
			encoder->pre_enable(encoder);
3770
 
4104 Serge 3771
	i9xx_enable_pll(intel_crtc);
3772
 
3773
	i9xx_pfit_enable(intel_crtc);
3774
 
3775
	intel_crtc_load_lut(crtc);
3776
 
2327 Serge 3777
    intel_enable_pipe(dev_priv, pipe, false);
3778
    intel_enable_plane(dev_priv, plane, pipe);
4104 Serge 3779
	intel_enable_planes(crtc);
3780
	/* The fixup needs to happen before cursor is enabled */
3480 Serge 3781
	if (IS_G4X(dev))
3782
		g4x_fixup_plane(dev_priv, pipe);
4557 Serge 3783
	intel_crtc_update_cursor(crtc, true);
2327 Serge 3784
 
3785
    /* Give the overlay scaler a chance to enable if it's on this pipe */
3786
    intel_crtc_dpms_overlay(intel_crtc, true);
3031 serge 3787
 
4104 Serge 3788
	intel_update_fbc(dev);
3789
 
3031 serge 3790
	for_each_encoder_on_crtc(dev, crtc, encoder)
3791
		encoder->enable(encoder);
2327 Serge 3792
}
3793
 
3746 Serge 3794
static void i9xx_pfit_disable(struct intel_crtc *crtc)
3795
{
3796
	struct drm_device *dev = crtc->base.dev;
3797
	struct drm_i915_private *dev_priv = dev->dev_private;
3798
 
4104 Serge 3799
	if (!crtc->config.gmch_pfit.control)
3800
		return;
3801
 
3746 Serge 3802
	assert_pipe_disabled(dev_priv, crtc->pipe);
3803
 
4104 Serge 3804
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
3805
			 I915_READ(PFIT_CONTROL));
3746 Serge 3806
		I915_WRITE(PFIT_CONTROL, 0);
3807
}
3808
 
2327 Serge 3809
static void i9xx_crtc_disable(struct drm_crtc *crtc)
3810
{
3811
    struct drm_device *dev = crtc->dev;
3812
    struct drm_i915_private *dev_priv = dev->dev_private;
3813
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3814
	struct intel_encoder *encoder;
2327 Serge 3815
    int pipe = intel_crtc->pipe;
3816
    int plane = intel_crtc->plane;
3817
 
3818
    if (!intel_crtc->active)
3819
        return;
3820
 
3031 serge 3821
	for_each_encoder_on_crtc(dev, crtc, encoder)
3822
		encoder->disable(encoder);
3823
 
2327 Serge 3824
    /* Give the overlay scaler a chance to disable if it's on this pipe */
3031 serge 3825
//    intel_crtc_wait_for_pending_flips(crtc);
2327 Serge 3826
//    drm_vblank_off(dev, pipe);
3827
 
4104 Serge 3828
	if (dev_priv->fbc.plane == plane)
2327 Serge 3829
        intel_disable_fbc(dev);
3830
 
4104 Serge 3831
	intel_crtc_dpms_overlay(intel_crtc, false);
4557 Serge 3832
	intel_crtc_update_cursor(crtc, false);
4104 Serge 3833
	intel_disable_planes(crtc);
2327 Serge 3834
    intel_disable_plane(dev_priv, plane, pipe);
4104 Serge 3835
 
2327 Serge 3836
    intel_disable_pipe(dev_priv, pipe);
3480 Serge 3837
 
3746 Serge 3838
	i9xx_pfit_disable(intel_crtc);
3480 Serge 3839
 
4104 Serge 3840
	for_each_encoder_on_crtc(dev, crtc, encoder)
3841
		if (encoder->post_disable)
3842
			encoder->post_disable(encoder);
2327 Serge 3843
 
4557 Serge 3844
	if (IS_VALLEYVIEW(dev))
3845
		vlv_disable_pll(dev_priv, pipe);
3846
	else
4104 Serge 3847
	i9xx_disable_pll(dev_priv, pipe);
3848
 
2327 Serge 3849
    intel_crtc->active = false;
3850
    intel_update_fbc(dev);
3851
    intel_update_watermarks(dev);
3852
}
3853
 
3031 serge 3854
static void i9xx_crtc_off(struct drm_crtc *crtc)
2327 Serge 3855
{
3856
}
3857
 
3031 serge 3858
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3859
				    bool enabled)
2330 Serge 3860
{
3861
	struct drm_device *dev = crtc->dev;
3862
	struct drm_i915_master_private *master_priv;
3863
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3864
	int pipe = intel_crtc->pipe;
2327 Serge 3865
 
3866
 
2340 Serge 3867
#if 0
2330 Serge 3868
	if (!dev->primary->master)
3869
		return;
2327 Serge 3870
 
2330 Serge 3871
	master_priv = dev->primary->master->driver_priv;
3872
	if (!master_priv->sarea_priv)
3873
		return;
2327 Serge 3874
 
2330 Serge 3875
	switch (pipe) {
3876
	case 0:
3877
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3878
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3879
		break;
3880
	case 1:
3881
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3882
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3883
		break;
3884
	default:
3885
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3886
		break;
3887
	}
2340 Serge 3888
#endif
3889
 
2330 Serge 3890
}
2327 Serge 3891
 
3031 serge 3892
/**
3893
 * Sets the power management mode of the pipe and plane.
3894
 */
3895
void intel_crtc_update_dpms(struct drm_crtc *crtc)
3896
{
3897
	struct drm_device *dev = crtc->dev;
3898
	struct drm_i915_private *dev_priv = dev->dev_private;
3899
	struct intel_encoder *intel_encoder;
3900
	bool enable = false;
3901
 
3902
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3903
		enable |= intel_encoder->connectors_active;
3904
 
3905
	if (enable)
3906
		dev_priv->display.crtc_enable(crtc);
3907
	else
3908
		dev_priv->display.crtc_disable(crtc);
3909
 
3910
	intel_crtc_update_sarea(crtc, enable);
3911
}
3912
 
2330 Serge 3913
static void intel_crtc_disable(struct drm_crtc *crtc)
3914
{
3915
	struct drm_device *dev = crtc->dev;
3031 serge 3916
	struct drm_connector *connector;
3917
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 3918
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 3919
 
3031 serge 3920
	/* crtc should still be enabled when we disable it. */
3921
	WARN_ON(!crtc->enabled);
2327 Serge 3922
 
4104 Serge 3923
	dev_priv->display.crtc_disable(crtc);
3480 Serge 3924
	intel_crtc->eld_vld = false;
3031 serge 3925
	intel_crtc_update_sarea(crtc, false);
3926
	dev_priv->display.off(crtc);
3927
 
3928
	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3929
	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3930
 
4280 Serge 3931
	if (crtc->fb) {
3932
		mutex_lock(&dev->struct_mutex);
3933
		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3934
		mutex_unlock(&dev->struct_mutex);
3935
		crtc->fb = NULL;
3936
	}
3031 serge 3937
 
3938
	/* Update computed state. */
3939
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3940
		if (!connector->encoder || !connector->encoder->crtc)
3941
			continue;
3942
 
3943
		if (connector->encoder->crtc != crtc)
3944
			continue;
3945
 
3946
		connector->dpms = DRM_MODE_DPMS_OFF;
3947
		to_intel_encoder(connector->encoder)->connectors_active = false;
2330 Serge 3948
	}
3949
}
2327 Serge 3950
 
3031 serge 3951
void intel_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 3952
{
3031 serge 3953
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3954
 
3955
	drm_encoder_cleanup(encoder);
3956
	kfree(intel_encoder);
2330 Serge 3957
}
2327 Serge 3958
 
4104 Serge 3959
/* Simple dpms helper for encoders with just one connector, no cloning and only
3031 serge 3960
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3961
 * state of the entire output pipe. */
4104 Serge 3962
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
2330 Serge 3963
{
3031 serge 3964
	if (mode == DRM_MODE_DPMS_ON) {
3965
		encoder->connectors_active = true;
3966
 
3967
		intel_crtc_update_dpms(encoder->base.crtc);
3968
	} else {
3969
		encoder->connectors_active = false;
3970
 
3971
		intel_crtc_update_dpms(encoder->base.crtc);
3972
	}
2330 Serge 3973
}
2327 Serge 3974
 
3031 serge 3975
/* Cross check the actual hw state with our own modeset state tracking (and it's
3976
 * internal consistency). */
3977
static void intel_connector_check_state(struct intel_connector *connector)
2330 Serge 3978
{
3031 serge 3979
	if (connector->get_hw_state(connector)) {
3980
		struct intel_encoder *encoder = connector->encoder;
3981
		struct drm_crtc *crtc;
3982
		bool encoder_enabled;
3983
		enum pipe pipe;
3984
 
3985
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3986
			      connector->base.base.id,
3987
			      drm_get_connector_name(&connector->base));
3988
 
3989
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
3990
		     "wrong connector dpms state\n");
3991
		WARN(connector->base.encoder != &encoder->base,
3992
		     "active connector not linked to encoder\n");
3993
		WARN(!encoder->connectors_active,
3994
		     "encoder->connectors_active not set\n");
3995
 
3996
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
3997
		WARN(!encoder_enabled, "encoder not enabled\n");
3998
		if (WARN_ON(!encoder->base.crtc))
3999
			return;
4000
 
4001
		crtc = encoder->base.crtc;
4002
 
4003
		WARN(!crtc->enabled, "crtc not enabled\n");
4004
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
4005
		WARN(pipe != to_intel_crtc(crtc)->pipe,
4006
		     "encoder active on the wrong pipe\n");
4007
	}
2330 Serge 4008
}
2327 Serge 4009
 
3031 serge 4010
/* Even simpler default implementation, if there's really no special case to
4011
 * consider. */
4012
void intel_connector_dpms(struct drm_connector *connector, int mode)
2330 Serge 4013
{
3031 serge 4014
	/* All the simple cases only support two dpms states. */
4015
	if (mode != DRM_MODE_DPMS_ON)
4016
		mode = DRM_MODE_DPMS_OFF;
2342 Serge 4017
 
3031 serge 4018
	if (mode == connector->dpms)
4019
		return;
4020
 
4021
	connector->dpms = mode;
4022
 
4023
	/* Only need to change hw state when actually enabled */
4104 Serge 4024
	if (connector->encoder)
4025
		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
3031 serge 4026
 
4027
	intel_modeset_check_state(connector->dev);
2330 Serge 4028
}
2327 Serge 4029
 
3031 serge 4030
/* Simple connector->get_hw_state implementation for encoders that support only
4031
 * one connector and no cloning and hence the encoder state determines the state
4032
 * of the connector. */
4033
bool intel_connector_get_hw_state(struct intel_connector *connector)
2330 Serge 4034
{
3031 serge 4035
	enum pipe pipe = 0;
4036
	struct intel_encoder *encoder = connector->encoder;
2330 Serge 4037
 
3031 serge 4038
	return encoder->get_hw_state(encoder, &pipe);
2330 Serge 4039
}
4040
 
4104 Serge 4041
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
4042
				     struct intel_crtc_config *pipe_config)
4043
{
4044
	struct drm_i915_private *dev_priv = dev->dev_private;
4045
	struct intel_crtc *pipe_B_crtc =
4046
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
4047
 
4048
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
4049
		      pipe_name(pipe), pipe_config->fdi_lanes);
4050
	if (pipe_config->fdi_lanes > 4) {
4051
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
4052
			      pipe_name(pipe), pipe_config->fdi_lanes);
4053
		return false;
4054
	}
4055
 
4056
	if (IS_HASWELL(dev)) {
4057
		if (pipe_config->fdi_lanes > 2) {
4058
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
4059
				      pipe_config->fdi_lanes);
4060
			return false;
4061
		} else {
4062
			return true;
4063
		}
4064
	}
4065
 
4066
	if (INTEL_INFO(dev)->num_pipes == 2)
4067
		return true;
4068
 
4069
	/* Ivybridge 3 pipe is really complicated */
4070
	switch (pipe) {
4071
	case PIPE_A:
4072
		return true;
4073
	case PIPE_B:
4074
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
4075
		    pipe_config->fdi_lanes > 2) {
4076
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4077
				      pipe_name(pipe), pipe_config->fdi_lanes);
4078
			return false;
4079
		}
4080
		return true;
4081
	case PIPE_C:
4082
		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
4083
		    pipe_B_crtc->config.fdi_lanes <= 2) {
4084
			if (pipe_config->fdi_lanes > 2) {
4085
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4086
					      pipe_name(pipe), pipe_config->fdi_lanes);
4087
				return false;
4088
			}
4089
		} else {
4090
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
4091
			return false;
4092
		}
4093
		return true;
4094
	default:
4095
		BUG();
4096
	}
4097
}
4098
 
4099
#define RETRY 1
4100
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
3746 Serge 4101
				      struct intel_crtc_config *pipe_config)
2330 Serge 4102
{
4104 Serge 4103
	struct drm_device *dev = intel_crtc->base.dev;
3746 Serge 4104
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4104 Serge 4105
	int lane, link_bw, fdi_dotclock;
4106
	bool setup_ok, needs_recompute = false;
2330 Serge 4107
 
4104 Serge 4108
retry:
4109
	/* FDI is a binary signal running at ~2.7GHz, encoding
4110
	 * each output octet as 10 bits. The actual frequency
4111
	 * is stored as a divider into a 100MHz clock, and the
4112
	 * mode pixel clock is stored in units of 1KHz.
4113
	 * Hence the bw of each lane in terms of the mode signal
4114
	 * is:
4115
	 */
4116
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4117
 
4118
	fdi_dotclock = adjusted_mode->clock;
4119
	fdi_dotclock /= pipe_config->pixel_multiplier;
4120
 
4121
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
4122
					   pipe_config->pipe_bpp);
4123
 
4124
	pipe_config->fdi_lanes = lane;
4125
 
4126
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
4127
			       link_bw, &pipe_config->fdi_m_n);
4128
 
4129
	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
4130
					    intel_crtc->pipe, pipe_config);
4131
	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
4132
		pipe_config->pipe_bpp -= 2*3;
4133
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
4134
			      pipe_config->pipe_bpp);
4135
		needs_recompute = true;
4136
		pipe_config->bw_constrained = true;
4137
 
4138
		goto retry;
4139
	}
4140
 
4141
	if (needs_recompute)
4142
		return RETRY;
4143
 
4144
	return setup_ok ? 0 : -EINVAL;
4145
}
4146
 
4147
static void hsw_compute_ips_config(struct intel_crtc *crtc,
4148
				   struct intel_crtc_config *pipe_config)
4149
{
4150
	pipe_config->ips_enabled = i915_enable_ips &&
4151
				   hsw_crtc_supports_ips(crtc) &&
4152
				   pipe_config->pipe_bpp <= 24;
4153
}
4154
 
4155
static int intel_crtc_compute_config(struct intel_crtc *crtc,
4156
				     struct intel_crtc_config *pipe_config)
4157
{
4158
	struct drm_device *dev = crtc->base.dev;
4159
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4160
 
2330 Serge 4161
	if (HAS_PCH_SPLIT(dev)) {
4162
		/* FDI link clock is fixed at 2.7G */
3746 Serge 4163
		if (pipe_config->requested_mode.clock * 3
4164
		    > IRONLAKE_FDI_FREQ * 4)
4104 Serge 4165
			return -EINVAL;
2330 Serge 4166
	}
4167
 
4104 Serge 4168
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4169
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
3031 serge 4170
	 */
4171
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
4172
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
4104 Serge 4173
		return -EINVAL;
3031 serge 4174
 
3746 Serge 4175
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
4176
		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
4177
	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
4178
		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
4179
		 * for lvds. */
4180
		pipe_config->pipe_bpp = 8*3;
4181
	}
4182
 
4104 Serge 4183
	if (HAS_IPS(dev))
4184
		hsw_compute_ips_config(crtc, pipe_config);
4185
 
4186
	/* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
4187
	 * clock survives for now. */
4188
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4189
		pipe_config->shared_dpll = crtc->config.shared_dpll;
4190
 
4191
	if (pipe_config->has_pch_encoder)
4192
		return ironlake_fdi_compute_config(crtc, pipe_config);
4193
 
4194
	return 0;
2330 Serge 4195
}
4196
 
3031 serge 4197
static int valleyview_get_display_clock_speed(struct drm_device *dev)
4198
{
4199
	return 400000; /* FIXME */
4200
}
4201
 
2327 Serge 4202
static int i945_get_display_clock_speed(struct drm_device *dev)
4203
{
4204
	return 400000;
4205
}
4206
 
4207
static int i915_get_display_clock_speed(struct drm_device *dev)
4208
{
4209
	return 333000;
4210
}
4211
 
4212
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
4213
{
4214
	return 200000;
4215
}
4216
 
4104 Serge 4217
static int pnv_get_display_clock_speed(struct drm_device *dev)
4218
{
4219
	u16 gcfgc = 0;
4220
 
4221
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4222
 
4223
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4224
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
4225
		return 267000;
4226
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
4227
		return 333000;
4228
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
4229
		return 444000;
4230
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
4231
		return 200000;
4232
	default:
4233
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
4234
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
4235
		return 133000;
4236
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
4237
		return 167000;
4238
	}
4239
}
4240
 
2327 Serge 4241
static int i915gm_get_display_clock_speed(struct drm_device *dev)
4242
{
4243
	u16 gcfgc = 0;
4244
 
4245
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4246
 
4247
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
4248
		return 133000;
4249
	else {
4250
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4251
		case GC_DISPLAY_CLOCK_333_MHZ:
4252
			return 333000;
4253
		default:
4254
		case GC_DISPLAY_CLOCK_190_200_MHZ:
4255
			return 190000;
4256
		}
4257
	}
4258
}
4259
 
4260
static int i865_get_display_clock_speed(struct drm_device *dev)
4261
{
4262
	return 266000;
4263
}
4264
 
4265
static int i855_get_display_clock_speed(struct drm_device *dev)
4266
{
4267
	u16 hpllcc = 0;
4268
	/* Assume that the hardware is in the high speed state.  This
4269
	 * should be the default.
4270
	 */
4271
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
4272
	case GC_CLOCK_133_200:
4273
	case GC_CLOCK_100_200:
4274
		return 200000;
4275
	case GC_CLOCK_166_250:
4276
		return 250000;
4277
	case GC_CLOCK_100_133:
4278
		return 133000;
4279
	}
4280
 
4281
	/* Shouldn't happen */
4282
	return 0;
4283
}
4284
 
4285
static int i830_get_display_clock_speed(struct drm_device *dev)
4286
{
4287
	return 133000;
4288
}
4289
 
4290
static void
3746 Serge 4291
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2327 Serge 4292
{
3746 Serge 4293
	while (*num > DATA_LINK_M_N_MASK ||
4294
	       *den > DATA_LINK_M_N_MASK) {
2327 Serge 4295
		*num >>= 1;
4296
		*den >>= 1;
4297
	}
4298
}
4299
 
3746 Serge 4300
static void compute_m_n(unsigned int m, unsigned int n,
4301
			uint32_t *ret_m, uint32_t *ret_n)
4302
{
4303
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4304
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
4305
	intel_reduce_m_n_ratio(ret_m, ret_n);
4306
}
4307
 
3480 Serge 4308
void
4309
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4310
		       int pixel_clock, int link_clock,
4311
		       struct intel_link_m_n *m_n)
2327 Serge 4312
{
3480 Serge 4313
	m_n->tu = 64;
3746 Serge 4314
 
4315
	compute_m_n(bits_per_pixel * pixel_clock,
4316
		    link_clock * nlanes * 8,
4317
		    &m_n->gmch_m, &m_n->gmch_n);
4318
 
4319
	compute_m_n(pixel_clock, link_clock,
4320
		    &m_n->link_m, &m_n->link_n);
2327 Serge 4321
}
4322
 
4323
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4324
{
2342 Serge 4325
	if (i915_panel_use_ssc >= 0)
4326
		return i915_panel_use_ssc != 0;
4104 Serge 4327
	return dev_priv->vbt.lvds_use_ssc
2327 Serge 4328
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4329
}
4330
 
3031 serge 4331
static int vlv_get_refclk(struct drm_crtc *crtc)
2327 Serge 4332
{
3031 serge 4333
	struct drm_device *dev = crtc->dev;
4334
	struct drm_i915_private *dev_priv = dev->dev_private;
4335
	int refclk = 27000; /* for DP & HDMI */
2327 Serge 4336
 
3031 serge 4337
	return 100000; /* only one validated so far */
2327 Serge 4338
 
3031 serge 4339
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
4340
		refclk = 96000;
4341
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4342
		if (intel_panel_use_ssc(dev_priv))
4343
			refclk = 100000;
4344
		else
4345
			refclk = 96000;
4346
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4347
		refclk = 100000;
4348
	}
2327 Serge 4349
 
3031 serge 4350
	return refclk;
4351
}
2327 Serge 4352
 
3031 serge 4353
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4354
{
4355
	struct drm_device *dev = crtc->dev;
4356
	struct drm_i915_private *dev_priv = dev->dev_private;
4357
	int refclk;
2327 Serge 4358
 
3031 serge 4359
	if (IS_VALLEYVIEW(dev)) {
4360
		refclk = vlv_get_refclk(crtc);
4361
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4362
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4104 Serge 4363
		refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
3031 serge 4364
		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4365
			      refclk / 1000);
4366
	} else if (!IS_GEN2(dev)) {
4367
		refclk = 96000;
4368
	} else {
4369
		refclk = 48000;
4370
	}
2327 Serge 4371
 
3031 serge 4372
	return refclk;
4373
}
2327 Serge 4374
 
4104 Serge 4375
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
3031 serge 4376
{
4104 Serge 4377
	return (1 << dpll->n) << 16 | dpll->m2;
4378
}
3746 Serge 4379
 
4104 Serge 4380
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
4381
{
4382
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
3031 serge 4383
}
2327 Serge 4384
 
3746 Serge 4385
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
3031 serge 4386
				     intel_clock_t *reduced_clock)
4387
{
3746 Serge 4388
	struct drm_device *dev = crtc->base.dev;
3031 serge 4389
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 4390
	int pipe = crtc->pipe;
3031 serge 4391
	u32 fp, fp2 = 0;
2327 Serge 4392
 
3031 serge 4393
	if (IS_PINEVIEW(dev)) {
4104 Serge 4394
		fp = pnv_dpll_compute_fp(&crtc->config.dpll);
3031 serge 4395
		if (reduced_clock)
4104 Serge 4396
			fp2 = pnv_dpll_compute_fp(reduced_clock);
3031 serge 4397
	} else {
4104 Serge 4398
		fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
3031 serge 4399
		if (reduced_clock)
4104 Serge 4400
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
3031 serge 4401
	}
2327 Serge 4402
 
3031 serge 4403
	I915_WRITE(FP0(pipe), fp);
4104 Serge 4404
	crtc->config.dpll_hw_state.fp0 = fp;
2327 Serge 4405
 
3746 Serge 4406
	crtc->lowfreq_avail = false;
4407
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
3031 serge 4408
	    reduced_clock && i915_powersave) {
4409
		I915_WRITE(FP1(pipe), fp2);
4104 Serge 4410
		crtc->config.dpll_hw_state.fp1 = fp2;
3746 Serge 4411
		crtc->lowfreq_avail = true;
3031 serge 4412
	} else {
4413
		I915_WRITE(FP1(pipe), fp);
4104 Serge 4414
		crtc->config.dpll_hw_state.fp1 = fp;
3031 serge 4415
	}
4416
}
2327 Serge 4417
 
4104 Serge 4418
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
4419
{
4420
	u32 reg_val;
4421
 
4422
	/*
4423
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
4424
	 * and set it to a reasonable value instead.
4425
	 */
4426
	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
4427
	reg_val &= 0xffffff00;
4428
	reg_val |= 0x00000030;
4429
	vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
4430
 
4431
	reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
4432
	reg_val &= 0x8cffffff;
4433
	reg_val = 0x8c000000;
4434
	vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
4435
 
4436
	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
4437
	reg_val &= 0xffffff00;
4438
	vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
4439
 
4440
	reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
4441
	reg_val &= 0x00ffffff;
4442
	reg_val |= 0xb0000000;
4443
	vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
4444
}
4445
 
4446
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
4447
					 struct intel_link_m_n *m_n)
4448
{
4449
	struct drm_device *dev = crtc->base.dev;
4450
	struct drm_i915_private *dev_priv = dev->dev_private;
4451
	int pipe = crtc->pipe;
4452
 
4453
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4454
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4455
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4456
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4457
}
4458
 
4459
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
4460
					 struct intel_link_m_n *m_n)
4461
{
4462
	struct drm_device *dev = crtc->base.dev;
4463
	struct drm_i915_private *dev_priv = dev->dev_private;
4464
	int pipe = crtc->pipe;
4465
	enum transcoder transcoder = crtc->config.cpu_transcoder;
4466
 
4467
	if (INTEL_INFO(dev)->gen >= 5) {
4468
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
4469
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
4470
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
4471
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
4472
	} else {
4473
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4474
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4475
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
4476
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
4477
	}
4478
}
4479
 
3746 Serge 4480
static void intel_dp_set_m_n(struct intel_crtc *crtc)
3031 serge 4481
{
3746 Serge 4482
	if (crtc->config.has_pch_encoder)
4483
		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4484
	else
4485
		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4486
}
4487
 
4488
static void vlv_update_pll(struct intel_crtc *crtc)
4489
{
4490
	struct drm_device *dev = crtc->base.dev;
3031 serge 4491
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 4492
	int pipe = crtc->pipe;
4104 Serge 4493
	u32 dpll, mdiv;
3031 serge 4494
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
4104 Serge 4495
	u32 coreclk, reg_val, dpll_md;
2327 Serge 4496
 
3480 Serge 4497
	mutex_lock(&dev_priv->dpio_lock);
4498
 
3746 Serge 4499
	bestn = crtc->config.dpll.n;
4500
	bestm1 = crtc->config.dpll.m1;
4501
	bestm2 = crtc->config.dpll.m2;
4502
	bestp1 = crtc->config.dpll.p1;
4503
	bestp2 = crtc->config.dpll.p2;
3031 serge 4504
 
4104 Serge 4505
	/* See eDP HDMI DPIO driver vbios notes doc */
4506
 
4507
	/* PLL B needs special handling */
4508
	if (pipe)
4509
		vlv_pllb_recal_opamp(dev_priv);
4510
 
4511
	/* Set up Tx target for periodic Rcomp update */
4512
	vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
4513
 
4514
	/* Disable target IRef on PLL */
4515
	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
4516
	reg_val &= 0x00ffffff;
4517
	vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
4518
 
4519
	/* Disable fast lock */
4520
	vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
4521
 
4522
	/* Set idtafcrecal before PLL is enabled */
3031 serge 4523
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4524
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4525
	mdiv |= ((bestn << DPIO_N_SHIFT));
4526
	mdiv |= (1 << DPIO_K_SHIFT);
4104 Serge 4527
 
4528
	/*
4529
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
4530
	 * but we don't support that).
4531
	 * Note: don't use the DAC post divider as it seems unstable.
4532
	 */
4533
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4534
	vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
4535
 
3031 serge 4536
	mdiv |= DPIO_ENABLE_CALIBRATION;
4104 Serge 4537
	vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
3031 serge 4538
 
4104 Serge 4539
	/* Set HBR and RBR LPF coefficients */
4540
	if (crtc->config.port_clock == 162000 ||
4541
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4542
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4543
		vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4544
				 0x009f0003);
4545
	else
4546
		vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4547
				 0x00d0000f);
3031 serge 4548
 
4104 Serge 4549
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4550
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
4551
		/* Use SSC source */
4552
		if (!pipe)
4553
			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4554
					 0x0df40000);
4555
		else
4556
			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4557
					 0x0df70000);
4558
	} else { /* HDMI or VGA */
4559
		/* Use bend source */
4560
		if (!pipe)
4561
			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4562
					 0x0df70000);
4563
		else
4564
			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4565
					 0x0df40000);
4566
	}
3031 serge 4567
 
4104 Serge 4568
	coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
4569
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
4570
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
4571
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
4572
		coreclk |= 0x01000000;
4573
	vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
3031 serge 4574
 
4104 Serge 4575
	vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
4576
 
4577
	/* Enable DPIO clock input */
4578
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4579
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
4398 Serge 4580
	/* We should never disable this, set it here for state tracking */
4581
	if (pipe == PIPE_B)
4104 Serge 4582
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
3031 serge 4583
	dpll |= DPLL_VCO_ENABLE;
4104 Serge 4584
	crtc->config.dpll_hw_state.dpll = dpll;
3031 serge 4585
 
4104 Serge 4586
	dpll_md = (crtc->config.pixel_multiplier - 1)
4587
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
4588
	crtc->config.dpll_hw_state.dpll_md = dpll_md;
3031 serge 4589
 
3746 Serge 4590
	if (crtc->config.has_dp_encoder)
4591
		intel_dp_set_m_n(crtc);
3243 Serge 4592
 
3480 Serge 4593
	mutex_unlock(&dev_priv->dpio_lock);
3031 serge 4594
}
4595
 
3746 Serge 4596
static void i9xx_update_pll(struct intel_crtc *crtc,
4597
			    intel_clock_t *reduced_clock,
3031 serge 4598
			    int num_connectors)
4599
{
3746 Serge 4600
	struct drm_device *dev = crtc->base.dev;
3031 serge 4601
	struct drm_i915_private *dev_priv = dev->dev_private;
4602
	u32 dpll;
4603
	bool is_sdvo;
3746 Serge 4604
	struct dpll *clock = &crtc->config.dpll;
3031 serge 4605
 
3746 Serge 4606
	i9xx_update_pll_dividers(crtc, reduced_clock);
3243 Serge 4607
 
3746 Serge 4608
	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
4609
		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
3031 serge 4610
 
4611
	dpll = DPLL_VGA_MODE_DIS;
4612
 
3746 Serge 4613
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
3031 serge 4614
		dpll |= DPLLB_MODE_LVDS;
4615
	else
4616
		dpll |= DPLLB_MODE_DAC_SERIAL;
3746 Serge 4617
 
4104 Serge 4618
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
3746 Serge 4619
			dpll |= (crtc->config.pixel_multiplier - 1)
4620
				<< SDVO_MULTIPLIER_SHIFT_HIRES;
2342 Serge 4621
		}
4104 Serge 4622
 
4623
	if (is_sdvo)
4624
		dpll |= DPLL_SDVO_HIGH_SPEED;
4625
 
3746 Serge 4626
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
4104 Serge 4627
		dpll |= DPLL_SDVO_HIGH_SPEED;
2342 Serge 4628
 
3031 serge 4629
	/* compute bitmask from p1 value */
4630
	if (IS_PINEVIEW(dev))
4631
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4632
	else {
4633
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4634
		if (IS_G4X(dev) && reduced_clock)
4635
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4636
	}
4637
	switch (clock->p2) {
4638
	case 5:
4639
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4640
		break;
4641
	case 7:
4642
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4643
		break;
4644
	case 10:
4645
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4646
		break;
4647
	case 14:
4648
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4649
		break;
4650
	}
4651
	if (INTEL_INFO(dev)->gen >= 4)
4652
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2327 Serge 4653
 
4104 Serge 4654
	if (crtc->config.sdvo_tv_clock)
3031 serge 4655
		dpll |= PLL_REF_INPUT_TVCLKINBC;
3746 Serge 4656
	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
3031 serge 4657
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4658
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4659
	else
4660
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 4661
 
3031 serge 4662
	dpll |= DPLL_VCO_ENABLE;
4104 Serge 4663
	crtc->config.dpll_hw_state.dpll = dpll;
2327 Serge 4664
 
4104 Serge 4665
	if (INTEL_INFO(dev)->gen >= 4) {
4666
		u32 dpll_md = (crtc->config.pixel_multiplier - 1)
4667
					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
4668
		crtc->config.dpll_hw_state.dpll_md = dpll_md;
4669
	}
2327 Serge 4670
 
3746 Serge 4671
	if (crtc->config.has_dp_encoder)
4672
		intel_dp_set_m_n(crtc);
3031 serge 4673
}
2327 Serge 4674
 
3746 Serge 4675
static void i8xx_update_pll(struct intel_crtc *crtc,
4676
			    intel_clock_t *reduced_clock,
3031 serge 4677
			    int num_connectors)
4678
{
3746 Serge 4679
	struct drm_device *dev = crtc->base.dev;
3031 serge 4680
	struct drm_i915_private *dev_priv = dev->dev_private;
4681
	u32 dpll;
3746 Serge 4682
	struct dpll *clock = &crtc->config.dpll;
2327 Serge 4683
 
3746 Serge 4684
	i9xx_update_pll_dividers(crtc, reduced_clock);
3243 Serge 4685
 
3031 serge 4686
	dpll = DPLL_VGA_MODE_DIS;
2327 Serge 4687
 
3746 Serge 4688
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
3031 serge 4689
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4690
	} else {
4691
		if (clock->p1 == 2)
4692
			dpll |= PLL_P1_DIVIDE_BY_TWO;
4693
		else
4694
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4695
		if (clock->p2 == 4)
4696
			dpll |= PLL_P2_DIVIDE_BY_4;
4697
	}
2327 Serge 4698
 
4104 Serge 4699
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
4700
		dpll |= DPLL_DVO_2X_MODE;
4701
 
3746 Serge 4702
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
3031 serge 4703
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4704
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4705
	else
4706
		dpll |= PLL_REF_INPUT_DREFCLK;
4707
 
4708
	dpll |= DPLL_VCO_ENABLE;
4104 Serge 4709
	crtc->config.dpll_hw_state.dpll = dpll;
3031 serge 4710
}
4711
 
4104 Serge 4712
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
3243 Serge 4713
{
4714
	struct drm_device *dev = intel_crtc->base.dev;
4715
	struct drm_i915_private *dev_priv = dev->dev_private;
4716
	enum pipe pipe = intel_crtc->pipe;
3746 Serge 4717
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4104 Serge 4718
	struct drm_display_mode *adjusted_mode =
4719
		&intel_crtc->config.adjusted_mode;
4720
	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
4721
	uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
3243 Serge 4722
 
4104 Serge 4723
	/* We need to be careful not to changed the adjusted mode, for otherwise
4724
	 * the hw state checker will get angry at the mismatch. */
4725
	crtc_vtotal = adjusted_mode->crtc_vtotal;
4726
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4727
 
3243 Serge 4728
	if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4729
		/* the chip adds 2 halflines automatically */
4104 Serge 4730
		crtc_vtotal -= 1;
4731
		crtc_vblank_end -= 1;
3243 Serge 4732
		vsyncshift = adjusted_mode->crtc_hsync_start
4733
			     - adjusted_mode->crtc_htotal / 2;
4734
	} else {
4735
		vsyncshift = 0;
4736
	}
4737
 
4738
	if (INTEL_INFO(dev)->gen > 3)
4739
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
4740
 
4741
	I915_WRITE(HTOTAL(cpu_transcoder),
4742
		   (adjusted_mode->crtc_hdisplay - 1) |
4743
		   ((adjusted_mode->crtc_htotal - 1) << 16));
4744
	I915_WRITE(HBLANK(cpu_transcoder),
4745
		   (adjusted_mode->crtc_hblank_start - 1) |
4746
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
4747
	I915_WRITE(HSYNC(cpu_transcoder),
4748
		   (adjusted_mode->crtc_hsync_start - 1) |
4749
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
4750
 
4751
	I915_WRITE(VTOTAL(cpu_transcoder),
4752
		   (adjusted_mode->crtc_vdisplay - 1) |
4104 Serge 4753
		   ((crtc_vtotal - 1) << 16));
3243 Serge 4754
	I915_WRITE(VBLANK(cpu_transcoder),
4755
		   (adjusted_mode->crtc_vblank_start - 1) |
4104 Serge 4756
		   ((crtc_vblank_end - 1) << 16));
3243 Serge 4757
	I915_WRITE(VSYNC(cpu_transcoder),
4758
		   (adjusted_mode->crtc_vsync_start - 1) |
4759
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
4760
 
4761
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4762
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4763
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4764
	 * bits. */
4765
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
4766
	    (pipe == PIPE_B || pipe == PIPE_C))
4767
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
4768
 
4769
	/* pipesrc controls the size that is scaled from, which should
4770
	 * always be the user's requested size.
4771
	 */
4772
	I915_WRITE(PIPESRC(pipe),
4773
		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4774
}
4775
 
4104 Serge 4776
static void intel_get_pipe_timings(struct intel_crtc *crtc,
4777
				   struct intel_crtc_config *pipe_config)
4778
{
4779
	struct drm_device *dev = crtc->base.dev;
4780
	struct drm_i915_private *dev_priv = dev->dev_private;
4781
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4782
	uint32_t tmp;
4783
 
4784
	tmp = I915_READ(HTOTAL(cpu_transcoder));
4785
	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4786
	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4787
	tmp = I915_READ(HBLANK(cpu_transcoder));
4788
	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
4789
	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
4790
	tmp = I915_READ(HSYNC(cpu_transcoder));
4791
	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4792
	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4793
 
4794
	tmp = I915_READ(VTOTAL(cpu_transcoder));
4795
	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4796
	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4797
	tmp = I915_READ(VBLANK(cpu_transcoder));
4798
	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
4799
	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
4800
	tmp = I915_READ(VSYNC(cpu_transcoder));
4801
	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4802
	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4803
 
4804
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
4805
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4806
		pipe_config->adjusted_mode.crtc_vtotal += 1;
4807
		pipe_config->adjusted_mode.crtc_vblank_end += 1;
4808
	}
4809
 
4810
	tmp = I915_READ(PIPESRC(crtc->pipe));
4811
	pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
4812
	pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
4813
}
4814
 
4815
static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
4816
					     struct intel_crtc_config *pipe_config)
4817
{
4818
	struct drm_crtc *crtc = &intel_crtc->base;
4819
 
4820
	crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
4821
	crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
4822
	crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
4823
	crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
4824
 
4825
	crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
4826
	crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
4827
	crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
4828
	crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
4829
 
4830
	crtc->mode.flags = pipe_config->adjusted_mode.flags;
4831
 
4832
	crtc->mode.clock = pipe_config->adjusted_mode.clock;
4833
	crtc->mode.flags |= pipe_config->adjusted_mode.flags;
4834
}
4835
 
3746 Serge 4836
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4837
{
4838
	struct drm_device *dev = intel_crtc->base.dev;
4839
	struct drm_i915_private *dev_priv = dev->dev_private;
4840
	uint32_t pipeconf;
4841
 
4104 Serge 4842
	pipeconf = 0;
3746 Serge 4843
 
4104 Serge 4844
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
4845
	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
4846
		pipeconf |= PIPECONF_ENABLE;
4847
 
3746 Serge 4848
	if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4849
		/* Enable pixel doubling when the dot clock is > 90% of the (display)
4850
		 * core speed.
4851
		 *
4852
		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4853
		 * pipe == 0 check?
4854
		 */
4855
		if (intel_crtc->config.requested_mode.clock >
4856
		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4857
			pipeconf |= PIPECONF_DOUBLE_WIDE;
4858
	}
4859
 
4104 Serge 4860
	/* only g4x and later have fancy bpc/dither controls */
4861
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
4862
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
4863
		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
4864
			pipeconf |= PIPECONF_DITHER_EN |
3746 Serge 4865
				    PIPECONF_DITHER_TYPE_SP;
4866
 
4104 Serge 4867
		switch (intel_crtc->config.pipe_bpp) {
4868
		case 18:
4869
			pipeconf |= PIPECONF_6BPC;
4870
			break;
4871
		case 24:
4872
			pipeconf |= PIPECONF_8BPC;
4873
			break;
4874
		case 30:
4875
			pipeconf |= PIPECONF_10BPC;
4876
			break;
4877
		default:
4878
			/* Case prevented by intel_choose_pipe_bpp_dither. */
4879
			BUG();
3746 Serge 4880
		}
4881
	}
4882
 
4883
	if (HAS_PIPE_CXSR(dev)) {
4884
		if (intel_crtc->lowfreq_avail) {
4885
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4886
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4887
		} else {
4888
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4889
		}
4890
	}
4891
 
4892
	if (!IS_GEN2(dev) &&
4893
	    intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
4894
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4895
	else
4896
		pipeconf |= PIPECONF_PROGRESSIVE;
4897
 
4104 Serge 4898
	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
3746 Serge 4899
			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4900
 
4901
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
4902
	POSTING_READ(PIPECONF(intel_crtc->pipe));
4903
}
4904
 
3031 serge 4905
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4906
			      int x, int y,
4907
			      struct drm_framebuffer *fb)
4908
{
4909
	struct drm_device *dev = crtc->dev;
4910
	struct drm_i915_private *dev_priv = dev->dev_private;
4911
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 4912
	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
3031 serge 4913
	int pipe = intel_crtc->pipe;
4914
	int plane = intel_crtc->plane;
4915
	int refclk, num_connectors = 0;
4916
	intel_clock_t clock, reduced_clock;
3746 Serge 4917
	u32 dspcntr;
4104 Serge 4918
	bool ok, has_reduced_clock = false;
4919
	bool is_lvds = false;
3031 serge 4920
	struct intel_encoder *encoder;
4921
	const intel_limit_t *limit;
4922
	int ret;
4923
 
4924
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4925
		switch (encoder->type) {
4926
		case INTEL_OUTPUT_LVDS:
4927
			is_lvds = true;
4928
			break;
4929
		}
4930
 
4931
		num_connectors++;
4932
	}
4933
 
4934
	refclk = i9xx_get_refclk(crtc, num_connectors);
4935
 
4936
	/*
4937
	 * Returns a set of divisors for the desired target clock with the given
4938
	 * refclk, or FALSE.  The returned values represent the clock equation:
4939
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4940
	 */
4941
	limit = intel_limit(crtc, refclk);
4104 Serge 4942
	ok = dev_priv->display.find_dpll(limit, crtc,
4943
					 intel_crtc->config.port_clock,
4944
					 refclk, NULL, &clock);
4945
	if (!ok && !intel_crtc->config.clock_set) {
3031 serge 4946
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
4947
		return -EINVAL;
4948
	}
4949
 
4950
	if (is_lvds && dev_priv->lvds_downclock_avail) {
4951
		/*
4952
		 * Ensure we match the reduced clock's P to the target clock.
4953
		 * If the clocks don't match, we can't switch the display clock
4954
		 * by using the FP0/FP1. In such case we will disable the LVDS
4955
		 * downclock feature.
4956
		*/
4104 Serge 4957
		has_reduced_clock =
4958
			dev_priv->display.find_dpll(limit, crtc,
3031 serge 4959
						    dev_priv->lvds_downclock,
4104 Serge 4960
						    refclk, &clock,
3031 serge 4961
						    &reduced_clock);
4962
	}
3746 Serge 4963
	/* Compat-code for transition, will disappear. */
4964
	if (!intel_crtc->config.clock_set) {
4965
		intel_crtc->config.dpll.n = clock.n;
4966
		intel_crtc->config.dpll.m1 = clock.m1;
4967
		intel_crtc->config.dpll.m2 = clock.m2;
4968
		intel_crtc->config.dpll.p1 = clock.p1;
4969
		intel_crtc->config.dpll.p2 = clock.p2;
4970
	}
3031 serge 4971
 
4972
	if (IS_GEN2(dev))
4104 Serge 4973
		i8xx_update_pll(intel_crtc,
3243 Serge 4974
				has_reduced_clock ? &reduced_clock : NULL,
4975
				num_connectors);
3031 serge 4976
	else if (IS_VALLEYVIEW(dev))
3746 Serge 4977
		vlv_update_pll(intel_crtc);
3031 serge 4978
	else
3746 Serge 4979
		i9xx_update_pll(intel_crtc,
3031 serge 4980
				has_reduced_clock ? &reduced_clock : NULL,
4981
				num_connectors);
4982
 
4983
	/* Set up the display plane register */
4984
	dspcntr = DISPPLANE_GAMMA_ENABLE;
4985
 
3746 Serge 4986
	if (!IS_VALLEYVIEW(dev)) {
3031 serge 4987
	if (pipe == 0)
4988
		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4989
	else
4990
		dspcntr |= DISPPLANE_SEL_PIPE_B;
3243 Serge 4991
	}
4992
 
4104 Serge 4993
	intel_set_pipe_timings(intel_crtc);
3031 serge 4994
 
4995
	/* pipesrc and dspsize control the size that is scaled from,
4996
	 * which should always be the user's requested size.
4997
	 */
4998
	I915_WRITE(DSPSIZE(plane),
4999
		   ((mode->vdisplay - 1) << 16) |
5000
		   (mode->hdisplay - 1));
5001
	I915_WRITE(DSPPOS(plane), 0);
2327 Serge 5002
 
3746 Serge 5003
	i9xx_set_pipeconf(intel_crtc);
5004
 
3031 serge 5005
	I915_WRITE(DSPCNTR(plane), dspcntr);
5006
	POSTING_READ(DSPCNTR(plane));
2327 Serge 5007
 
3031 serge 5008
	ret = intel_pipe_set_base(crtc, x, y, fb);
2327 Serge 5009
 
3031 serge 5010
	intel_update_watermarks(dev);
5011
 
2327 Serge 5012
    return ret;
5013
}
5014
 
4104 Serge 5015
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5016
				 struct intel_crtc_config *pipe_config)
5017
{
5018
	struct drm_device *dev = crtc->base.dev;
5019
	struct drm_i915_private *dev_priv = dev->dev_private;
5020
	uint32_t tmp;
5021
 
5022
	tmp = I915_READ(PFIT_CONTROL);
5023
	if (!(tmp & PFIT_ENABLE))
5024
		return;
5025
 
5026
	/* Check whether the pfit is attached to our pipe. */
5027
	if (INTEL_INFO(dev)->gen < 4) {
5028
		if (crtc->pipe != PIPE_B)
5029
			return;
5030
	} else {
5031
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5032
			return;
5033
	}
5034
 
5035
	pipe_config->gmch_pfit.control = tmp;
5036
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
5037
	if (INTEL_INFO(dev)->gen < 5)
5038
		pipe_config->gmch_pfit.lvds_border_bits =
5039
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
5040
}
5041
 
4398 Serge 5042
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5043
			       struct intel_crtc_config *pipe_config)
5044
{
5045
	struct drm_device *dev = crtc->base.dev;
5046
	struct drm_i915_private *dev_priv = dev->dev_private;
5047
	int pipe = pipe_config->cpu_transcoder;
5048
	intel_clock_t clock;
5049
	u32 mdiv;
5050
	int refclk = 100000;
5051
 
5052
	mutex_lock(&dev_priv->dpio_lock);
5053
	mdiv = vlv_dpio_read(dev_priv, DPIO_DIV(pipe));
5054
	mutex_unlock(&dev_priv->dpio_lock);
5055
 
5056
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5057
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
5058
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5059
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5060
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5061
 
5062
	clock.vco = refclk * clock.m1 * clock.m2 / clock.n;
5063
	clock.dot = 2 * clock.vco / (clock.p1 * clock.p2);
5064
 
5065
	pipe_config->adjusted_mode.clock = clock.dot / 10;
5066
}
5067
 
3746 Serge 5068
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5069
				 struct intel_crtc_config *pipe_config)
5070
{
5071
	struct drm_device *dev = crtc->base.dev;
5072
	struct drm_i915_private *dev_priv = dev->dev_private;
5073
	uint32_t tmp;
5074
 
4104 Serge 5075
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5076
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5077
 
3746 Serge 5078
	tmp = I915_READ(PIPECONF(crtc->pipe));
5079
	if (!(tmp & PIPECONF_ENABLE))
5080
		return false;
5081
 
4280 Serge 5082
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5083
		switch (tmp & PIPECONF_BPC_MASK) {
5084
		case PIPECONF_6BPC:
5085
			pipe_config->pipe_bpp = 18;
5086
			break;
5087
		case PIPECONF_8BPC:
5088
			pipe_config->pipe_bpp = 24;
5089
			break;
5090
		case PIPECONF_10BPC:
5091
			pipe_config->pipe_bpp = 30;
5092
			break;
5093
		default:
5094
			break;
5095
		}
5096
	}
5097
 
4104 Serge 5098
	intel_get_pipe_timings(crtc, pipe_config);
5099
 
5100
	i9xx_get_pfit_config(crtc, pipe_config);
5101
 
5102
	if (INTEL_INFO(dev)->gen >= 4) {
5103
		tmp = I915_READ(DPLL_MD(crtc->pipe));
5104
		pipe_config->pixel_multiplier =
5105
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5106
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5107
		pipe_config->dpll_hw_state.dpll_md = tmp;
5108
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5109
		tmp = I915_READ(DPLL(crtc->pipe));
5110
		pipe_config->pixel_multiplier =
5111
			((tmp & SDVO_MULTIPLIER_MASK)
5112
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5113
	} else {
5114
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
5115
		 * port and will be fixed up in the encoder->get_config
5116
		 * function. */
5117
		pipe_config->pixel_multiplier = 1;
5118
	}
5119
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
5120
	if (!IS_VALLEYVIEW(dev)) {
5121
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
5122
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
5123
	} else {
5124
		/* Mask out read-only status bits. */
5125
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5126
						     DPLL_PORTC_READY_MASK |
5127
						     DPLL_PORTB_READY_MASK);
5128
	}
5129
 
3746 Serge 5130
	return true;
5131
}
5132
 
3243 Serge 5133
static void ironlake_init_pch_refclk(struct drm_device *dev)
2327 Serge 5134
{
5135
	struct drm_i915_private *dev_priv = dev->dev_private;
5136
	struct drm_mode_config *mode_config = &dev->mode_config;
5137
	struct intel_encoder *encoder;
3746 Serge 5138
	u32 val, final;
2327 Serge 5139
	bool has_lvds = false;
2342 Serge 5140
	bool has_cpu_edp = false;
5141
	bool has_panel = false;
5142
	bool has_ck505 = false;
5143
	bool can_ssc = false;
2327 Serge 5144
 
5145
	/* We need to take the global config into account */
5146
		list_for_each_entry(encoder, &mode_config->encoder_list,
5147
				    base.head) {
5148
			switch (encoder->type) {
5149
			case INTEL_OUTPUT_LVDS:
2342 Serge 5150
			has_panel = true;
2327 Serge 5151
				has_lvds = true;
2342 Serge 5152
			break;
2327 Serge 5153
			case INTEL_OUTPUT_EDP:
2342 Serge 5154
			has_panel = true;
4104 Serge 5155
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
2342 Serge 5156
				has_cpu_edp = true;
2327 Serge 5157
				break;
5158
			}
5159
		}
2342 Serge 5160
 
5161
	if (HAS_PCH_IBX(dev)) {
4104 Serge 5162
		has_ck505 = dev_priv->vbt.display_clock_mode;
2342 Serge 5163
		can_ssc = has_ck505;
5164
	} else {
5165
		has_ck505 = false;
5166
		can_ssc = true;
2327 Serge 5167
	}
5168
 
4104 Serge 5169
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
5170
		      has_panel, has_lvds, has_ck505);
2342 Serge 5171
 
2327 Serge 5172
	/* Ironlake: try to setup display ref clock before DPLL
5173
	 * enabling. This is only under driver's control after
5174
	 * PCH B stepping, previous chipset stepping should be
5175
	 * ignoring this setting.
5176
	 */
3746 Serge 5177
	val = I915_READ(PCH_DREF_CONTROL);
5178
 
5179
	/* As we must carefully and slowly disable/enable each source in turn,
5180
	 * compute the final state we want first and check if we need to
5181
	 * make any changes at all.
5182
	 */
5183
	final = val;
5184
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
5185
	if (has_ck505)
5186
		final |= DREF_NONSPREAD_CK505_ENABLE;
5187
	else
5188
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
5189
 
5190
	final &= ~DREF_SSC_SOURCE_MASK;
5191
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5192
	final &= ~DREF_SSC1_ENABLE;
5193
 
5194
	if (has_panel) {
5195
		final |= DREF_SSC_SOURCE_ENABLE;
5196
 
5197
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
5198
			final |= DREF_SSC1_ENABLE;
5199
 
5200
		if (has_cpu_edp) {
5201
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
5202
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5203
			else
5204
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5205
		} else
5206
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5207
	} else {
5208
		final |= DREF_SSC_SOURCE_DISABLE;
5209
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5210
	}
5211
 
5212
	if (final == val)
5213
		return;
5214
 
2327 Serge 5215
	/* Always enable nonspread source */
3746 Serge 5216
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
2342 Serge 5217
 
5218
	if (has_ck505)
3746 Serge 5219
		val |= DREF_NONSPREAD_CK505_ENABLE;
2342 Serge 5220
	else
3746 Serge 5221
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
2342 Serge 5222
 
5223
	if (has_panel) {
3746 Serge 5224
		val &= ~DREF_SSC_SOURCE_MASK;
5225
		val |= DREF_SSC_SOURCE_ENABLE;
2327 Serge 5226
 
2342 Serge 5227
		/* SSC must be turned on before enabling the CPU output  */
5228
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5229
			DRM_DEBUG_KMS("Using SSC on panel\n");
3746 Serge 5230
			val |= DREF_SSC1_ENABLE;
3031 serge 5231
		} else
3746 Serge 5232
			val &= ~DREF_SSC1_ENABLE;
2327 Serge 5233
 
2342 Serge 5234
		/* Get SSC going before enabling the outputs */
3746 Serge 5235
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 5236
			POSTING_READ(PCH_DREF_CONTROL);
5237
			udelay(200);
2342 Serge 5238
 
3746 Serge 5239
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2327 Serge 5240
 
5241
		/* Enable CPU source on CPU attached eDP */
2342 Serge 5242
		if (has_cpu_edp) {
5243
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5244
				DRM_DEBUG_KMS("Using SSC on eDP\n");
3746 Serge 5245
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
2342 Serge 5246
			}
2327 Serge 5247
			else
3746 Serge 5248
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2342 Serge 5249
		} else
3746 Serge 5250
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 5251
 
3746 Serge 5252
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 5253
		POSTING_READ(PCH_DREF_CONTROL);
5254
		udelay(200);
2327 Serge 5255
		} else {
2342 Serge 5256
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
5257
 
3746 Serge 5258
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
2342 Serge 5259
 
5260
		/* Turn off CPU output */
3746 Serge 5261
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
2342 Serge 5262
 
3746 Serge 5263
		I915_WRITE(PCH_DREF_CONTROL, val);
2327 Serge 5264
		POSTING_READ(PCH_DREF_CONTROL);
5265
		udelay(200);
2342 Serge 5266
 
5267
		/* Turn off the SSC source */
3746 Serge 5268
		val &= ~DREF_SSC_SOURCE_MASK;
5269
		val |= DREF_SSC_SOURCE_DISABLE;
2342 Serge 5270
 
5271
		/* Turn off SSC1 */
3746 Serge 5272
		val &= ~DREF_SSC1_ENABLE;
2342 Serge 5273
 
3746 Serge 5274
		I915_WRITE(PCH_DREF_CONTROL, val);
2342 Serge 5275
		POSTING_READ(PCH_DREF_CONTROL);
5276
		udelay(200);
2327 Serge 5277
	}
3746 Serge 5278
 
5279
	BUG_ON(val != final);
2327 Serge 5280
}
5281
 
4104 Serge 5282
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
3243 Serge 5283
{
4104 Serge 5284
	uint32_t tmp;
3243 Serge 5285
 
5286
		tmp = I915_READ(SOUTH_CHICKEN2);
5287
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5288
		I915_WRITE(SOUTH_CHICKEN2, tmp);
5289
 
5290
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5291
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5292
			DRM_ERROR("FDI mPHY reset assert timeout\n");
5293
 
5294
		tmp = I915_READ(SOUTH_CHICKEN2);
5295
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5296
		I915_WRITE(SOUTH_CHICKEN2, tmp);
5297
 
5298
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
4104 Serge 5299
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
3243 Serge 5300
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
4539 Serge 5301
}
3243 Serge 5302
 
4104 Serge 5303
/* WaMPhyProgramming:hsw */
5304
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5305
{
5306
	uint32_t tmp;
5307
 
3243 Serge 5308
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5309
	tmp &= ~(0xFF << 24);
5310
	tmp |= (0x12 << 24);
5311
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5312
 
5313
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5314
	tmp |= (1 << 11);
5315
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5316
 
5317
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5318
	tmp |= (1 << 11);
5319
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5320
 
5321
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5322
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5323
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5324
 
5325
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5326
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5327
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5328
 
5329
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5330
		tmp &= ~(7 << 13);
5331
		tmp |= (5 << 13);
5332
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5333
 
5334
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5335
		tmp &= ~(7 << 13);
5336
		tmp |= (5 << 13);
5337
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5338
 
5339
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5340
	tmp &= ~0xFF;
5341
	tmp |= 0x1C;
5342
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5343
 
5344
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5345
	tmp &= ~0xFF;
5346
	tmp |= 0x1C;
5347
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5348
 
5349
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5350
	tmp &= ~(0xFF << 16);
5351
	tmp |= (0x1C << 16);
5352
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5353
 
5354
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5355
	tmp &= ~(0xFF << 16);
5356
	tmp |= (0x1C << 16);
5357
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5358
 
5359
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5360
		tmp |= (1 << 27);
5361
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5362
 
5363
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5364
		tmp |= (1 << 27);
5365
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5366
 
5367
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5368
		tmp &= ~(0xF << 28);
5369
		tmp |= (4 << 28);
5370
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5371
 
5372
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5373
		tmp &= ~(0xF << 28);
5374
		tmp |= (4 << 28);
5375
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
4539 Serge 5376
}
3243 Serge 5377
 
4104 Serge 5378
/* Implements 3 different sequences from BSpec chapter "Display iCLK
5379
 * Programming" based on the parameters passed:
5380
 * - Sequence to enable CLKOUT_DP
5381
 * - Sequence to enable CLKOUT_DP without spread
5382
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5383
 */
5384
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
5385
				 bool with_fdi)
5386
{
5387
	struct drm_i915_private *dev_priv = dev->dev_private;
5388
	uint32_t reg, tmp;
3480 Serge 5389
 
4104 Serge 5390
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
5391
		with_spread = true;
5392
	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
5393
		 with_fdi, "LP PCH doesn't have FDI\n"))
5394
		with_fdi = false;
5395
 
5396
	mutex_lock(&dev_priv->dpio_lock);
5397
 
5398
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5399
	tmp &= ~SBI_SSCCTL_DISABLE;
5400
	tmp |= SBI_SSCCTL_PATHALT;
5401
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5402
 
5403
	udelay(24);
5404
 
5405
	if (with_spread) {
5406
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5407
		tmp &= ~SBI_SSCCTL_PATHALT;
5408
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5409
 
5410
		if (with_fdi) {
5411
			lpt_reset_fdi_mphy(dev_priv);
5412
			lpt_program_fdi_mphy(dev_priv);
5413
		}
5414
	}
5415
 
5416
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5417
	       SBI_GEN0 : SBI_DBUFF0;
5418
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5419
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5420
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5421
 
3480 Serge 5422
	mutex_unlock(&dev_priv->dpio_lock);
3243 Serge 5423
}
5424
 
4104 Serge 5425
/* Sequence to disable CLKOUT_DP */
5426
static void lpt_disable_clkout_dp(struct drm_device *dev)
5427
{
5428
	struct drm_i915_private *dev_priv = dev->dev_private;
5429
	uint32_t reg, tmp;
5430
 
5431
	mutex_lock(&dev_priv->dpio_lock);
5432
 
5433
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5434
	       SBI_GEN0 : SBI_DBUFF0;
5435
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5436
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5437
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5438
 
5439
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5440
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5441
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5442
			tmp |= SBI_SSCCTL_PATHALT;
5443
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5444
			udelay(32);
5445
		}
5446
		tmp |= SBI_SSCCTL_DISABLE;
5447
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5448
	}
5449
 
5450
	mutex_unlock(&dev_priv->dpio_lock);
5451
}
5452
 
5453
static void lpt_init_pch_refclk(struct drm_device *dev)
5454
{
5455
	struct drm_mode_config *mode_config = &dev->mode_config;
5456
	struct intel_encoder *encoder;
5457
	bool has_vga = false;
5458
 
5459
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5460
		switch (encoder->type) {
5461
		case INTEL_OUTPUT_ANALOG:
5462
			has_vga = true;
5463
			break;
5464
		}
5465
	}
5466
 
5467
	if (has_vga)
5468
		lpt_enable_clkout_dp(dev, true, true);
5469
	else
5470
		lpt_disable_clkout_dp(dev);
5471
}
5472
 
3243 Serge 5473
/*
5474
 * Initialize reference clocks when the driver loads
5475
 */
5476
void intel_init_pch_refclk(struct drm_device *dev)
5477
{
5478
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5479
		ironlake_init_pch_refclk(dev);
5480
	else if (HAS_PCH_LPT(dev))
5481
		lpt_init_pch_refclk(dev);
5482
}
5483
 
2342 Serge 5484
static int ironlake_get_refclk(struct drm_crtc *crtc)
5485
{
5486
	struct drm_device *dev = crtc->dev;
5487
	struct drm_i915_private *dev_priv = dev->dev_private;
5488
	struct intel_encoder *encoder;
5489
	int num_connectors = 0;
5490
	bool is_lvds = false;
5491
 
3031 serge 5492
	for_each_encoder_on_crtc(dev, crtc, encoder) {
2342 Serge 5493
		switch (encoder->type) {
5494
		case INTEL_OUTPUT_LVDS:
5495
			is_lvds = true;
5496
			break;
5497
		}
5498
		num_connectors++;
5499
	}
5500
 
5501
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5502
		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4104 Serge 5503
			      dev_priv->vbt.lvds_ssc_freq);
5504
		return dev_priv->vbt.lvds_ssc_freq * 1000;
2342 Serge 5505
	}
5506
 
5507
	return 120000;
5508
}
5509
 
4104 Serge 5510
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
3031 serge 5511
{
5512
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5513
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5514
	int pipe = intel_crtc->pipe;
5515
	uint32_t val;
5516
 
4104 Serge 5517
	val = 0;
3031 serge 5518
 
3746 Serge 5519
	switch (intel_crtc->config.pipe_bpp) {
3031 serge 5520
	case 18:
3480 Serge 5521
		val |= PIPECONF_6BPC;
3031 serge 5522
		break;
5523
	case 24:
3480 Serge 5524
		val |= PIPECONF_8BPC;
3031 serge 5525
		break;
5526
	case 30:
3480 Serge 5527
		val |= PIPECONF_10BPC;
3031 serge 5528
		break;
5529
	case 36:
3480 Serge 5530
		val |= PIPECONF_12BPC;
3031 serge 5531
		break;
5532
	default:
3243 Serge 5533
		/* Case prevented by intel_choose_pipe_bpp_dither. */
5534
		BUG();
3031 serge 5535
	}
5536
 
4104 Serge 5537
	if (intel_crtc->config.dither)
3031 serge 5538
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5539
 
4104 Serge 5540
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3031 serge 5541
		val |= PIPECONF_INTERLACED_ILK;
5542
	else
5543
		val |= PIPECONF_PROGRESSIVE;
5544
 
3746 Serge 5545
	if (intel_crtc->config.limited_color_range)
3480 Serge 5546
		val |= PIPECONF_COLOR_RANGE_SELECT;
5547
 
3031 serge 5548
	I915_WRITE(PIPECONF(pipe), val);
5549
	POSTING_READ(PIPECONF(pipe));
5550
}
5551
 
3480 Serge 5552
/*
5553
 * Set up the pipe CSC unit.
5554
 *
5555
 * Currently only full range RGB to limited range RGB conversion
5556
 * is supported, but eventually this should handle various
5557
 * RGB<->YCbCr scenarios as well.
5558
 */
3746 Serge 5559
static void intel_set_pipe_csc(struct drm_crtc *crtc)
3480 Serge 5560
{
5561
	struct drm_device *dev = crtc->dev;
5562
	struct drm_i915_private *dev_priv = dev->dev_private;
5563
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5564
	int pipe = intel_crtc->pipe;
5565
	uint16_t coeff = 0x7800; /* 1.0 */
5566
 
5567
	/*
5568
	 * TODO: Check what kind of values actually come out of the pipe
5569
	 * with these coeff/postoff values and adjust to get the best
5570
	 * accuracy. Perhaps we even need to take the bpc value into
5571
	 * consideration.
5572
	 */
5573
 
3746 Serge 5574
	if (intel_crtc->config.limited_color_range)
3480 Serge 5575
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
5576
 
5577
	/*
5578
	 * GY/GU and RY/RU should be the other way around according
5579
	 * to BSpec, but reality doesn't agree. Just set them up in
5580
	 * a way that results in the correct picture.
5581
	 */
5582
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
5583
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
5584
 
5585
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
5586
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
5587
 
5588
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
5589
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
5590
 
5591
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
5592
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
5593
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
5594
 
5595
	if (INTEL_INFO(dev)->gen > 6) {
5596
		uint16_t postoff = 0;
5597
 
3746 Serge 5598
		if (intel_crtc->config.limited_color_range)
4398 Serge 5599
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
3480 Serge 5600
 
5601
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
5602
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
5603
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
5604
 
5605
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
5606
	} else {
5607
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
5608
 
3746 Serge 5609
		if (intel_crtc->config.limited_color_range)
3480 Serge 5610
			mode |= CSC_BLACK_SCREEN_OFFSET;
5611
 
5612
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
5613
	}
5614
}
5615
 
4104 Serge 5616
static void haswell_set_pipeconf(struct drm_crtc *crtc)
3243 Serge 5617
{
5618
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5619
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 5620
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3243 Serge 5621
	uint32_t val;
5622
 
4104 Serge 5623
	val = 0;
3243 Serge 5624
 
4104 Serge 5625
	if (intel_crtc->config.dither)
3243 Serge 5626
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5627
 
4104 Serge 5628
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3243 Serge 5629
		val |= PIPECONF_INTERLACED_ILK;
5630
	else
5631
		val |= PIPECONF_PROGRESSIVE;
5632
 
5633
	I915_WRITE(PIPECONF(cpu_transcoder), val);
5634
	POSTING_READ(PIPECONF(cpu_transcoder));
4104 Serge 5635
 
5636
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
5637
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
3243 Serge 5638
}
5639
 
3031 serge 5640
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5641
				    intel_clock_t *clock,
5642
				    bool *has_reduced_clock,
5643
				    intel_clock_t *reduced_clock)
5644
{
5645
	struct drm_device *dev = crtc->dev;
5646
	struct drm_i915_private *dev_priv = dev->dev_private;
5647
	struct intel_encoder *intel_encoder;
5648
	int refclk;
5649
	const intel_limit_t *limit;
4104 Serge 5650
	bool ret, is_lvds = false;
3031 serge 5651
 
5652
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5653
		switch (intel_encoder->type) {
5654
		case INTEL_OUTPUT_LVDS:
5655
			is_lvds = true;
5656
			break;
5657
		}
5658
	}
5659
 
5660
	refclk = ironlake_get_refclk(crtc);
5661
 
5662
	/*
5663
	 * Returns a set of divisors for the desired target clock with the given
5664
	 * refclk, or FALSE.  The returned values represent the clock equation:
5665
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5666
	 */
5667
	limit = intel_limit(crtc, refclk);
4104 Serge 5668
	ret = dev_priv->display.find_dpll(limit, crtc,
5669
					  to_intel_crtc(crtc)->config.port_clock,
5670
					  refclk, NULL, clock);
3031 serge 5671
	if (!ret)
5672
		return false;
5673
 
5674
	if (is_lvds && dev_priv->lvds_downclock_avail) {
5675
		/*
5676
		 * Ensure we match the reduced clock's P to the target clock.
5677
		 * If the clocks don't match, we can't switch the display clock
5678
		 * by using the FP0/FP1. In such case we will disable the LVDS
5679
		 * downclock feature.
5680
		*/
4104 Serge 5681
		*has_reduced_clock =
5682
			dev_priv->display.find_dpll(limit, crtc,
3031 serge 5683
						     dev_priv->lvds_downclock,
4104 Serge 5684
						    refclk, clock,
3031 serge 5685
						     reduced_clock);
5686
	}
5687
 
5688
	return true;
5689
}
5690
 
3243 Serge 5691
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5692
{
5693
	/*
5694
	 * Account for spread spectrum to avoid
5695
	 * oversubscribing the link. Max center spread
5696
	 * is 2.5%; use 5% for safety's sake.
5697
	 */
5698
	u32 bps = target_clock * bpp * 21 / 20;
5699
	return bps / (link_bw * 8) + 1;
5700
}
5701
 
4104 Serge 5702
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
2327 Serge 5703
{
4104 Serge 5704
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
3746 Serge 5705
}
5706
 
3243 Serge 5707
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
4104 Serge 5708
				      u32 *fp,
3746 Serge 5709
				      intel_clock_t *reduced_clock, u32 *fp2)
3243 Serge 5710
{
5711
	struct drm_crtc *crtc = &intel_crtc->base;
5712
	struct drm_device *dev = crtc->dev;
5713
	struct drm_i915_private *dev_priv = dev->dev_private;
5714
	struct intel_encoder *intel_encoder;
5715
	uint32_t dpll;
3746 Serge 5716
	int factor, num_connectors = 0;
4104 Serge 5717
	bool is_lvds = false, is_sdvo = false;
3243 Serge 5718
 
5719
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5720
		switch (intel_encoder->type) {
5721
		case INTEL_OUTPUT_LVDS:
5722
			is_lvds = true;
5723
			break;
5724
		case INTEL_OUTPUT_SDVO:
5725
		case INTEL_OUTPUT_HDMI:
5726
			is_sdvo = true;
5727
			break;
5728
		}
5729
 
5730
		num_connectors++;
5731
	}
5732
 
2327 Serge 5733
    /* Enable autotuning of the PLL clock (if permissible) */
5734
    factor = 21;
5735
    if (is_lvds) {
5736
        if ((intel_panel_use_ssc(dev_priv) &&
4104 Serge 5737
		     dev_priv->vbt.lvds_ssc_freq == 100) ||
3746 Serge 5738
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
2327 Serge 5739
            factor = 25;
4104 Serge 5740
	} else if (intel_crtc->config.sdvo_tv_clock)
2327 Serge 5741
        factor = 20;
5742
 
4104 Serge 5743
	if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
3746 Serge 5744
		*fp |= FP_CB_TUNE;
2327 Serge 5745
 
3746 Serge 5746
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
5747
		*fp2 |= FP_CB_TUNE;
5748
 
2327 Serge 5749
    dpll = 0;
5750
 
5751
    if (is_lvds)
5752
        dpll |= DPLLB_MODE_LVDS;
5753
    else
5754
        dpll |= DPLLB_MODE_DAC_SERIAL;
4104 Serge 5755
 
3746 Serge 5756
			dpll |= (intel_crtc->config.pixel_multiplier - 1)
5757
				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
2327 Serge 5758
 
4104 Serge 5759
	if (is_sdvo)
5760
		dpll |= DPLL_SDVO_HIGH_SPEED;
5761
	if (intel_crtc->config.has_dp_encoder)
5762
		dpll |= DPLL_SDVO_HIGH_SPEED;
5763
 
2327 Serge 5764
    /* compute bitmask from p1 value */
4104 Serge 5765
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
2327 Serge 5766
    /* also FPA1 */
4104 Serge 5767
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2327 Serge 5768
 
4104 Serge 5769
	switch (intel_crtc->config.dpll.p2) {
2327 Serge 5770
    case 5:
5771
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5772
        break;
5773
    case 7:
5774
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5775
        break;
5776
    case 10:
5777
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5778
        break;
5779
    case 14:
5780
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5781
        break;
5782
    }
5783
 
4104 Serge 5784
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
2327 Serge 5785
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5786
    else
5787
        dpll |= PLL_REF_INPUT_DREFCLK;
5788
 
4104 Serge 5789
	return dpll | DPLL_VCO_ENABLE;
3243 Serge 5790
}
5791
 
5792
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5793
				  int x, int y,
5794
				  struct drm_framebuffer *fb)
5795
{
5796
	struct drm_device *dev = crtc->dev;
5797
	struct drm_i915_private *dev_priv = dev->dev_private;
5798
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5799
	int pipe = intel_crtc->pipe;
5800
	int plane = intel_crtc->plane;
5801
	int num_connectors = 0;
5802
	intel_clock_t clock, reduced_clock;
4104 Serge 5803
	u32 dpll = 0, fp = 0, fp2 = 0;
3243 Serge 5804
	bool ok, has_reduced_clock = false;
3746 Serge 5805
	bool is_lvds = false;
3243 Serge 5806
	struct intel_encoder *encoder;
4104 Serge 5807
	struct intel_shared_dpll *pll;
3243 Serge 5808
	int ret;
5809
 
5810
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5811
		switch (encoder->type) {
5812
		case INTEL_OUTPUT_LVDS:
5813
			is_lvds = true;
5814
			break;
5815
		}
5816
 
5817
		num_connectors++;
5818
	}
5819
 
5820
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5821
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
5822
 
4104 Serge 5823
	ok = ironlake_compute_clocks(crtc, &clock,
3243 Serge 5824
				     &has_reduced_clock, &reduced_clock);
4104 Serge 5825
	if (!ok && !intel_crtc->config.clock_set) {
3243 Serge 5826
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5827
		return -EINVAL;
5828
	}
3746 Serge 5829
	/* Compat-code for transition, will disappear. */
5830
	if (!intel_crtc->config.clock_set) {
5831
		intel_crtc->config.dpll.n = clock.n;
5832
		intel_crtc->config.dpll.m1 = clock.m1;
5833
		intel_crtc->config.dpll.m2 = clock.m2;
5834
		intel_crtc->config.dpll.p1 = clock.p1;
5835
		intel_crtc->config.dpll.p2 = clock.p2;
5836
	}
3243 Serge 5837
 
4104 Serge 5838
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5839
	if (intel_crtc->config.has_pch_encoder) {
5840
		fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
3243 Serge 5841
	if (has_reduced_clock)
4104 Serge 5842
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
3243 Serge 5843
 
4104 Serge 5844
		dpll = ironlake_compute_dpll(intel_crtc,
5845
					     &fp, &reduced_clock,
3746 Serge 5846
				     has_reduced_clock ? &fp2 : NULL);
3243 Serge 5847
 
4104 Serge 5848
		intel_crtc->config.dpll_hw_state.dpll = dpll;
5849
		intel_crtc->config.dpll_hw_state.fp0 = fp;
5850
		if (has_reduced_clock)
5851
			intel_crtc->config.dpll_hw_state.fp1 = fp2;
5852
		else
5853
			intel_crtc->config.dpll_hw_state.fp1 = fp;
2327 Serge 5854
 
4104 Serge 5855
		pll = intel_get_shared_dpll(intel_crtc);
3031 serge 5856
		if (pll == NULL) {
4104 Serge 5857
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5858
					 pipe_name(pipe));
2342 Serge 5859
			return -EINVAL;
2327 Serge 5860
        }
3031 serge 5861
	} else
4104 Serge 5862
		intel_put_shared_dpll(intel_crtc);
2327 Serge 5863
 
3746 Serge 5864
	if (intel_crtc->config.has_dp_encoder)
5865
		intel_dp_set_m_n(intel_crtc);
2342 Serge 5866
 
4104 Serge 5867
	if (is_lvds && has_reduced_clock && i915_powersave)
5868
		intel_crtc->lowfreq_avail = true;
5869
	else
5870
		intel_crtc->lowfreq_avail = false;
2327 Serge 5871
 
4104 Serge 5872
	if (intel_crtc->config.has_pch_encoder) {
5873
		pll = intel_crtc_to_shared_dpll(intel_crtc);
2327 Serge 5874
 
4104 Serge 5875
	}
2327 Serge 5876
 
4104 Serge 5877
	intel_set_pipe_timings(intel_crtc);
2327 Serge 5878
 
4104 Serge 5879
	if (intel_crtc->config.has_pch_encoder) {
5880
		intel_cpu_transcoder_set_m_n(intel_crtc,
5881
					     &intel_crtc->config.fdi_m_n);
2342 Serge 5882
	}
2327 Serge 5883
 
4104 Serge 5884
	ironlake_set_pipeconf(crtc);
3243 Serge 5885
 
5886
	/* Set up the display plane register */
5887
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
5888
	POSTING_READ(DSPCNTR(plane));
5889
 
5890
	ret = intel_pipe_set_base(crtc, x, y, fb);
5891
 
5892
	intel_update_watermarks(dev);
5893
 
4104 Serge 5894
	return ret;
5895
}
3243 Serge 5896
 
4104 Serge 5897
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
5898
					struct intel_crtc_config *pipe_config)
5899
{
5900
	struct drm_device *dev = crtc->base.dev;
5901
	struct drm_i915_private *dev_priv = dev->dev_private;
5902
	enum transcoder transcoder = pipe_config->cpu_transcoder;
5903
 
5904
	pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
5905
	pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
5906
	pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
5907
					& ~TU_SIZE_MASK;
5908
	pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
5909
	pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
5910
				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3243 Serge 5911
}
5912
 
4104 Serge 5913
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
5914
				     struct intel_crtc_config *pipe_config)
5915
{
5916
	struct drm_device *dev = crtc->base.dev;
5917
	struct drm_i915_private *dev_priv = dev->dev_private;
5918
	uint32_t tmp;
5919
 
5920
	tmp = I915_READ(PF_CTL(crtc->pipe));
5921
 
5922
	if (tmp & PF_ENABLE) {
5923
		pipe_config->pch_pfit.enabled = true;
5924
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
5925
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
5926
 
5927
		/* We currently do not free assignements of panel fitters on
5928
		 * ivb/hsw (since we don't use the higher upscaling modes which
5929
		 * differentiates them) so just WARN about this case for now. */
5930
		if (IS_GEN7(dev)) {
5931
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
5932
				PF_PIPE_SEL_IVB(crtc->pipe));
5933
		}
5934
	}
5935
}
5936
 
3746 Serge 5937
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5938
				     struct intel_crtc_config *pipe_config)
5939
{
5940
	struct drm_device *dev = crtc->base.dev;
5941
	struct drm_i915_private *dev_priv = dev->dev_private;
5942
	uint32_t tmp;
5943
 
4104 Serge 5944
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5945
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5946
 
3746 Serge 5947
	tmp = I915_READ(PIPECONF(crtc->pipe));
5948
	if (!(tmp & PIPECONF_ENABLE))
5949
		return false;
5950
 
4280 Serge 5951
	switch (tmp & PIPECONF_BPC_MASK) {
5952
	case PIPECONF_6BPC:
5953
		pipe_config->pipe_bpp = 18;
5954
		break;
5955
	case PIPECONF_8BPC:
5956
		pipe_config->pipe_bpp = 24;
5957
		break;
5958
	case PIPECONF_10BPC:
5959
		pipe_config->pipe_bpp = 30;
5960
		break;
5961
	case PIPECONF_12BPC:
5962
		pipe_config->pipe_bpp = 36;
5963
		break;
5964
	default:
5965
		break;
5966
	}
5967
 
4104 Serge 5968
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5969
		struct intel_shared_dpll *pll;
5970
 
3746 Serge 5971
		pipe_config->has_pch_encoder = true;
5972
 
4104 Serge 5973
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
5974
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5975
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
5976
 
5977
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
5978
 
5979
		if (HAS_PCH_IBX(dev_priv->dev)) {
5980
			pipe_config->shared_dpll =
5981
				(enum intel_dpll_id) crtc->pipe;
5982
		} else {
5983
			tmp = I915_READ(PCH_DPLL_SEL);
5984
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
5985
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
5986
			else
5987
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
5988
		}
5989
 
5990
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
5991
 
5992
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
5993
					   &pipe_config->dpll_hw_state));
5994
 
5995
		tmp = pipe_config->dpll_hw_state.dpll;
5996
		pipe_config->pixel_multiplier =
5997
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5998
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5999
	} else {
6000
		pipe_config->pixel_multiplier = 1;
6001
	}
6002
 
6003
	intel_get_pipe_timings(crtc, pipe_config);
6004
 
6005
	ironlake_get_pfit_config(crtc, pipe_config);
6006
 
3746 Serge 6007
	return true;
6008
}
6009
 
4104 Serge 6010
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6011
{
6012
	struct drm_device *dev = dev_priv->dev;
6013
	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
6014
	struct intel_crtc *crtc;
6015
	unsigned long irqflags;
6016
	uint32_t val;
6017
 
6018
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
4539 Serge 6019
		WARN(crtc->active, "CRTC for pipe %c enabled\n",
4104 Serge 6020
		     pipe_name(crtc->pipe));
6021
 
6022
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
6023
	WARN(plls->spll_refcount, "SPLL enabled\n");
6024
	WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
6025
	WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
6026
	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
6027
	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
6028
	     "CPU PWM1 enabled\n");
6029
	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
6030
	     "CPU PWM2 enabled\n");
6031
	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
6032
	     "PCH PWM1 enabled\n");
6033
	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
6034
	     "Utility pin enabled\n");
6035
	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
6036
 
6037
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
6038
	val = I915_READ(DEIMR);
6039
	WARN((val & ~DE_PCH_EVENT_IVB) != val,
6040
	     "Unexpected DEIMR bits enabled: 0x%x\n", val);
6041
	val = I915_READ(SDEIMR);
6042
	WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
6043
	     "Unexpected SDEIMR bits enabled: 0x%x\n", val);
6044
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
6045
}
6046
 
6047
/*
6048
 * This function implements pieces of two sequences from BSpec:
6049
 * - Sequence for display software to disable LCPLL
6050
 * - Sequence for display software to allow package C8+
6051
 * The steps implemented here are just the steps that actually touch the LCPLL
6052
 * register. Callers should take care of disabling all the display engine
6053
 * functions, doing the mode unset, fixing interrupts, etc.
6054
 */
6055
void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6056
		       bool switch_to_fclk, bool allow_power_down)
6057
{
6058
	uint32_t val;
6059
 
6060
	assert_can_disable_lcpll(dev_priv);
6061
 
6062
	val = I915_READ(LCPLL_CTL);
6063
 
6064
	if (switch_to_fclk) {
6065
		val |= LCPLL_CD_SOURCE_FCLK;
6066
		I915_WRITE(LCPLL_CTL, val);
6067
 
6068
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
6069
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
6070
			DRM_ERROR("Switching to FCLK failed\n");
6071
 
6072
		val = I915_READ(LCPLL_CTL);
6073
	}
6074
 
6075
	val |= LCPLL_PLL_DISABLE;
6076
	I915_WRITE(LCPLL_CTL, val);
6077
	POSTING_READ(LCPLL_CTL);
6078
 
6079
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
6080
		DRM_ERROR("LCPLL still locked\n");
6081
 
6082
	val = I915_READ(D_COMP);
6083
	val |= D_COMP_COMP_DISABLE;
6084
	I915_WRITE(D_COMP, val);
6085
	POSTING_READ(D_COMP);
6086
    udelay(100);
6087
 
6088
	if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
6089
		DRM_ERROR("D_COMP RCOMP still in progress\n");
6090
 
6091
	if (allow_power_down) {
6092
		val = I915_READ(LCPLL_CTL);
6093
		val |= LCPLL_POWER_DOWN_ALLOW;
6094
		I915_WRITE(LCPLL_CTL, val);
6095
		POSTING_READ(LCPLL_CTL);
6096
	}
6097
}
6098
 
6099
/*
6100
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6101
 * source.
6102
 */
6103
void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6104
{
6105
	uint32_t val;
6106
 
6107
	val = I915_READ(LCPLL_CTL);
6108
 
6109
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
6110
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
6111
		return;
6112
 
6113
	/* Make sure we're not on PC8 state before disabling PC8, otherwise
6114
	 * we'll hang the machine! */
4398 Serge 6115
	gen6_gt_force_wake_get(dev_priv);
4104 Serge 6116
 
6117
	if (val & LCPLL_POWER_DOWN_ALLOW) {
6118
		val &= ~LCPLL_POWER_DOWN_ALLOW;
6119
		I915_WRITE(LCPLL_CTL, val);
6120
		POSTING_READ(LCPLL_CTL);
6121
	}
6122
 
6123
	val = I915_READ(D_COMP);
6124
	val |= D_COMP_COMP_FORCE;
6125
	val &= ~D_COMP_COMP_DISABLE;
6126
	I915_WRITE(D_COMP, val);
6127
	POSTING_READ(D_COMP);
6128
 
6129
	val = I915_READ(LCPLL_CTL);
6130
	val &= ~LCPLL_PLL_DISABLE;
6131
	I915_WRITE(LCPLL_CTL, val);
6132
 
6133
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
6134
		DRM_ERROR("LCPLL not locked yet\n");
6135
 
6136
	if (val & LCPLL_CD_SOURCE_FCLK) {
6137
		val = I915_READ(LCPLL_CTL);
6138
		val &= ~LCPLL_CD_SOURCE_FCLK;
6139
		I915_WRITE(LCPLL_CTL, val);
6140
 
6141
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
6142
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
6143
			DRM_ERROR("Switching back to LCPLL failed\n");
6144
	}
6145
 
4398 Serge 6146
	gen6_gt_force_wake_put(dev_priv);
4104 Serge 6147
}
6148
 
6149
void hsw_enable_pc8_work(struct work_struct *__work)
6150
{
6151
	struct drm_i915_private *dev_priv =
6152
		container_of(to_delayed_work(__work), struct drm_i915_private,
6153
			     pc8.enable_work);
6154
	struct drm_device *dev = dev_priv->dev;
6155
	uint32_t val;
6156
 
6157
	if (dev_priv->pc8.enabled)
6158
		return;
6159
 
6160
	DRM_DEBUG_KMS("Enabling package C8+\n");
6161
 
6162
	dev_priv->pc8.enabled = true;
6163
 
6164
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6165
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
6166
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6167
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6168
	}
6169
 
6170
	lpt_disable_clkout_dp(dev);
6171
	hsw_pc8_disable_interrupts(dev);
6172
	hsw_disable_lcpll(dev_priv, true, true);
6173
}
6174
 
6175
static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6176
{
6177
	WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6178
	WARN(dev_priv->pc8.disable_count < 1,
6179
	     "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6180
 
6181
	dev_priv->pc8.disable_count--;
6182
	if (dev_priv->pc8.disable_count != 0)
6183
		return;
6184
 
6185
	schedule_delayed_work(&dev_priv->pc8.enable_work,
6186
			      msecs_to_jiffies(i915_pc8_timeout));
6187
}
6188
 
6189
static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6190
{
6191
	struct drm_device *dev = dev_priv->dev;
6192
	uint32_t val;
6193
 
6194
	WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6195
	WARN(dev_priv->pc8.disable_count < 0,
6196
	     "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6197
 
6198
	dev_priv->pc8.disable_count++;
6199
	if (dev_priv->pc8.disable_count != 1)
6200
		return;
6201
 
4293 Serge 6202
	cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
4104 Serge 6203
	if (!dev_priv->pc8.enabled)
6204
		return;
6205
 
6206
	DRM_DEBUG_KMS("Disabling package C8+\n");
6207
 
6208
	hsw_restore_lcpll(dev_priv);
6209
	hsw_pc8_restore_interrupts(dev);
6210
	lpt_init_pch_refclk(dev);
6211
 
6212
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6213
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
6214
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
6215
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6216
	}
6217
 
6218
	intel_prepare_ddi(dev);
6219
	i915_gem_init_swizzling(dev);
6220
	mutex_lock(&dev_priv->rps.hw_lock);
6221
	gen6_update_ring_freq(dev);
6222
	mutex_unlock(&dev_priv->rps.hw_lock);
6223
	dev_priv->pc8.enabled = false;
6224
}
6225
 
6226
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6227
{
6228
	mutex_lock(&dev_priv->pc8.lock);
6229
	__hsw_enable_package_c8(dev_priv);
6230
	mutex_unlock(&dev_priv->pc8.lock);
6231
}
6232
 
6233
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6234
{
6235
	mutex_lock(&dev_priv->pc8.lock);
6236
	__hsw_disable_package_c8(dev_priv);
6237
	mutex_unlock(&dev_priv->pc8.lock);
6238
}
6239
 
6240
static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
6241
{
6242
	struct drm_device *dev = dev_priv->dev;
6243
	struct intel_crtc *crtc;
6244
	uint32_t val;
6245
 
6246
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6247
		if (crtc->base.enabled)
6248
			return false;
6249
 
6250
	/* This case is still possible since we have the i915.disable_power_well
6251
	 * parameter and also the KVMr or something else might be requesting the
6252
	 * power well. */
6253
	val = I915_READ(HSW_PWR_WELL_DRIVER);
6254
	if (val != 0) {
6255
		DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
6256
		return false;
6257
	}
6258
 
6259
	return true;
6260
}
6261
 
6262
/* Since we're called from modeset_global_resources there's no way to
6263
 * symmetrically increase and decrease the refcount, so we use
6264
 * dev_priv->pc8.requirements_met to track whether we already have the refcount
6265
 * or not.
6266
 */
6267
static void hsw_update_package_c8(struct drm_device *dev)
6268
{
6269
	struct drm_i915_private *dev_priv = dev->dev_private;
6270
	bool allow;
6271
 
6272
	if (!i915_enable_pc8)
6273
		return;
6274
 
6275
	mutex_lock(&dev_priv->pc8.lock);
6276
 
6277
	allow = hsw_can_enable_package_c8(dev_priv);
6278
 
6279
	if (allow == dev_priv->pc8.requirements_met)
6280
		goto done;
6281
 
6282
	dev_priv->pc8.requirements_met = allow;
6283
 
6284
	if (allow)
6285
		__hsw_enable_package_c8(dev_priv);
6286
	else
6287
		__hsw_disable_package_c8(dev_priv);
6288
 
6289
done:
6290
	mutex_unlock(&dev_priv->pc8.lock);
6291
}
6292
 
6293
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6294
{
6295
	if (!dev_priv->pc8.gpu_idle) {
6296
		dev_priv->pc8.gpu_idle = true;
6297
		hsw_enable_package_c8(dev_priv);
6298
	}
6299
}
6300
 
6301
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6302
{
6303
	if (dev_priv->pc8.gpu_idle) {
6304
		dev_priv->pc8.gpu_idle = false;
6305
		hsw_disable_package_c8(dev_priv);
6306
	}
6307
}
6308
 
3480 Serge 6309
static void haswell_modeset_global_resources(struct drm_device *dev)
6310
{
6311
	bool enable = false;
6312
	struct intel_crtc *crtc;
6313
 
6314
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
4104 Serge 6315
		if (!crtc->base.enabled)
6316
			continue;
3480 Serge 6317
 
4104 Serge 6318
		if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
6319
		    crtc->config.cpu_transcoder != TRANSCODER_EDP)
3480 Serge 6320
			enable = true;
6321
	}
6322
 
4104 Serge 6323
	intel_set_power_well(dev, enable);
3480 Serge 6324
 
4104 Serge 6325
	hsw_update_package_c8(dev);
3480 Serge 6326
}
6327
 
3243 Serge 6328
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6329
				 int x, int y,
6330
				 struct drm_framebuffer *fb)
6331
{
6332
	struct drm_device *dev = crtc->dev;
6333
	struct drm_i915_private *dev_priv = dev->dev_private;
6334
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6335
	int plane = intel_crtc->plane;
6336
	int ret;
6337
 
4104 Serge 6338
	if (!intel_ddi_pll_mode_set(crtc))
3243 Serge 6339
		return -EINVAL;
6340
 
3746 Serge 6341
	if (intel_crtc->config.has_dp_encoder)
6342
		intel_dp_set_m_n(intel_crtc);
2327 Serge 6343
 
3243 Serge 6344
	intel_crtc->lowfreq_avail = false;
2327 Serge 6345
 
4104 Serge 6346
	intel_set_pipe_timings(intel_crtc);
3243 Serge 6347
 
4104 Serge 6348
	if (intel_crtc->config.has_pch_encoder) {
6349
		intel_cpu_transcoder_set_m_n(intel_crtc,
6350
					     &intel_crtc->config.fdi_m_n);
6351
	}
3243 Serge 6352
 
4104 Serge 6353
	haswell_set_pipeconf(crtc);
2327 Serge 6354
 
3746 Serge 6355
	intel_set_pipe_csc(crtc);
3480 Serge 6356
 
3031 serge 6357
	/* Set up the display plane register */
3480 Serge 6358
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
2327 Serge 6359
    POSTING_READ(DSPCNTR(plane));
6360
 
3031 serge 6361
	ret = intel_pipe_set_base(crtc, x, y, fb);
2327 Serge 6362
 
6363
    intel_update_watermarks(dev);
6364
 
6365
    return ret;
6366
}
6367
 
3746 Serge 6368
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6369
				    struct intel_crtc_config *pipe_config)
6370
{
6371
	struct drm_device *dev = crtc->base.dev;
6372
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 6373
	enum intel_display_power_domain pfit_domain;
3746 Serge 6374
	uint32_t tmp;
6375
 
4104 Serge 6376
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6377
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6378
 
6379
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
6380
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
6381
		enum pipe trans_edp_pipe;
6382
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6383
		default:
6384
			WARN(1, "unknown pipe linked to edp transcoder\n");
6385
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
6386
		case TRANS_DDI_EDP_INPUT_A_ON:
6387
			trans_edp_pipe = PIPE_A;
6388
			break;
6389
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
6390
			trans_edp_pipe = PIPE_B;
6391
			break;
6392
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
6393
			trans_edp_pipe = PIPE_C;
6394
			break;
6395
		}
6396
 
6397
		if (trans_edp_pipe == crtc->pipe)
6398
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
6399
	}
6400
 
6401
	if (!intel_display_power_enabled(dev,
6402
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6403
		return false;
6404
 
6405
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
3746 Serge 6406
	if (!(tmp & PIPECONF_ENABLE))
6407
		return false;
6408
 
6409
	/*
4104 Serge 6410
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
3746 Serge 6411
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
6412
	 * the PCH transcoder is on.
6413
	 */
4104 Serge 6414
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
3746 Serge 6415
	if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
4104 Serge 6416
	    I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
3746 Serge 6417
		pipe_config->has_pch_encoder = true;
6418
 
4104 Serge 6419
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
6420
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6421
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
3746 Serge 6422
 
4104 Serge 6423
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
6424
	}
6425
 
6426
	intel_get_pipe_timings(crtc, pipe_config);
6427
 
6428
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
6429
	if (intel_display_power_enabled(dev, pfit_domain))
6430
		ironlake_get_pfit_config(crtc, pipe_config);
6431
 
6432
	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
6433
				   (I915_READ(IPS_CTL) & IPS_ENABLE);
6434
 
6435
	pipe_config->pixel_multiplier = 1;
6436
 
3746 Serge 6437
	return true;
6438
}
6439
 
2330 Serge 6440
static int intel_crtc_mode_set(struct drm_crtc *crtc,
6441
			       int x, int y,
3031 serge 6442
			       struct drm_framebuffer *fb)
2330 Serge 6443
{
6444
	struct drm_device *dev = crtc->dev;
6445
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 6446
	struct intel_encoder *encoder;
2330 Serge 6447
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 6448
	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
2330 Serge 6449
	int pipe = intel_crtc->pipe;
6450
	int ret;
2327 Serge 6451
 
3031 serge 6452
	drm_vblank_pre_modeset(dev, pipe);
2327 Serge 6453
 
3746 Serge 6454
	ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
6455
 
3031 serge 6456
	drm_vblank_post_modeset(dev, pipe);
2327 Serge 6457
 
3243 Serge 6458
	if (ret != 0)
2330 Serge 6459
	return ret;
3243 Serge 6460
 
6461
	for_each_encoder_on_crtc(dev, crtc, encoder) {
6462
		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
6463
			encoder->base.base.id,
6464
			drm_get_encoder_name(&encoder->base),
6465
			mode->base.id, mode->name);
3746 Serge 6466
			encoder->mode_set(encoder);
3243 Serge 6467
	}
6468
 
6469
	return 0;
2330 Serge 6470
}
2327 Serge 6471
 
2342 Serge 6472
static bool intel_eld_uptodate(struct drm_connector *connector,
6473
			       int reg_eldv, uint32_t bits_eldv,
6474
			       int reg_elda, uint32_t bits_elda,
6475
			       int reg_edid)
6476
{
6477
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6478
	uint8_t *eld = connector->eld;
6479
	uint32_t i;
6480
 
6481
	i = I915_READ(reg_eldv);
6482
	i &= bits_eldv;
6483
 
6484
	if (!eld[0])
6485
		return !i;
6486
 
6487
	if (!i)
6488
		return false;
6489
 
6490
	i = I915_READ(reg_elda);
6491
	i &= ~bits_elda;
6492
	I915_WRITE(reg_elda, i);
6493
 
6494
	for (i = 0; i < eld[2]; i++)
6495
		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6496
			return false;
6497
 
6498
	return true;
6499
}
6500
 
6501
static void g4x_write_eld(struct drm_connector *connector,
6502
			  struct drm_crtc *crtc)
6503
{
6504
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6505
	uint8_t *eld = connector->eld;
6506
	uint32_t eldv;
6507
	uint32_t len;
6508
	uint32_t i;
6509
 
6510
	i = I915_READ(G4X_AUD_VID_DID);
6511
 
6512
	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6513
		eldv = G4X_ELDV_DEVCL_DEVBLC;
6514
	else
6515
		eldv = G4X_ELDV_DEVCTG;
6516
 
6517
	if (intel_eld_uptodate(connector,
6518
			       G4X_AUD_CNTL_ST, eldv,
6519
			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6520
			       G4X_HDMIW_HDMIEDID))
6521
		return;
6522
 
6523
	i = I915_READ(G4X_AUD_CNTL_ST);
6524
	i &= ~(eldv | G4X_ELD_ADDR);
6525
	len = (i >> 9) & 0x1f;		/* ELD buffer size */
6526
	I915_WRITE(G4X_AUD_CNTL_ST, i);
6527
 
6528
	if (!eld[0])
6529
		return;
6530
 
6531
	len = min_t(uint8_t, eld[2], len);
6532
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6533
	for (i = 0; i < len; i++)
6534
		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6535
 
6536
	i = I915_READ(G4X_AUD_CNTL_ST);
6537
	i |= eldv;
6538
	I915_WRITE(G4X_AUD_CNTL_ST, i);
6539
}
6540
 
3031 serge 6541
static void haswell_write_eld(struct drm_connector *connector,
6542
				     struct drm_crtc *crtc)
6543
{
6544
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6545
	uint8_t *eld = connector->eld;
6546
	struct drm_device *dev = crtc->dev;
3480 Serge 6547
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 6548
	uint32_t eldv;
6549
	uint32_t i;
6550
	int len;
6551
	int pipe = to_intel_crtc(crtc)->pipe;
6552
	int tmp;
6553
 
6554
	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
6555
	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
6556
	int aud_config = HSW_AUD_CFG(pipe);
6557
	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
6558
 
6559
 
6560
	DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
6561
 
6562
	/* Audio output enable */
6563
	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
6564
	tmp = I915_READ(aud_cntrl_st2);
6565
	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
6566
	I915_WRITE(aud_cntrl_st2, tmp);
6567
 
6568
	/* Wait for 1 vertical blank */
6569
	intel_wait_for_vblank(dev, pipe);
6570
 
6571
	/* Set ELD valid state */
6572
	tmp = I915_READ(aud_cntrl_st2);
4104 Serge 6573
	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
3031 serge 6574
	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
6575
	I915_WRITE(aud_cntrl_st2, tmp);
6576
	tmp = I915_READ(aud_cntrl_st2);
4104 Serge 6577
	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
3031 serge 6578
 
6579
	/* Enable HDMI mode */
6580
	tmp = I915_READ(aud_config);
4104 Serge 6581
	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
3031 serge 6582
	/* clear N_programing_enable and N_value_index */
6583
	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
6584
	I915_WRITE(aud_config, tmp);
6585
 
6586
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
6587
 
6588
	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
3480 Serge 6589
	intel_crtc->eld_vld = true;
3031 serge 6590
 
6591
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6592
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6593
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
6594
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6595
	} else
6596
		I915_WRITE(aud_config, 0);
6597
 
6598
	if (intel_eld_uptodate(connector,
6599
			       aud_cntrl_st2, eldv,
6600
			       aud_cntl_st, IBX_ELD_ADDRESS,
6601
			       hdmiw_hdmiedid))
6602
		return;
6603
 
6604
	i = I915_READ(aud_cntrl_st2);
6605
	i &= ~eldv;
6606
	I915_WRITE(aud_cntrl_st2, i);
6607
 
6608
	if (!eld[0])
6609
		return;
6610
 
6611
	i = I915_READ(aud_cntl_st);
6612
	i &= ~IBX_ELD_ADDRESS;
6613
	I915_WRITE(aud_cntl_st, i);
6614
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
6615
	DRM_DEBUG_DRIVER("port num:%d\n", i);
6616
 
6617
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
6618
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6619
	for (i = 0; i < len; i++)
6620
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6621
 
6622
	i = I915_READ(aud_cntrl_st2);
6623
	i |= eldv;
6624
	I915_WRITE(aud_cntrl_st2, i);
6625
 
6626
}
6627
 
2342 Serge 6628
static void ironlake_write_eld(struct drm_connector *connector,
6629
				     struct drm_crtc *crtc)
6630
{
6631
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6632
	uint8_t *eld = connector->eld;
6633
	uint32_t eldv;
6634
	uint32_t i;
6635
	int len;
6636
	int hdmiw_hdmiedid;
3031 serge 6637
	int aud_config;
2342 Serge 6638
	int aud_cntl_st;
6639
	int aud_cntrl_st2;
3031 serge 6640
	int pipe = to_intel_crtc(crtc)->pipe;
2342 Serge 6641
 
6642
	if (HAS_PCH_IBX(connector->dev)) {
3031 serge 6643
		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
6644
		aud_config = IBX_AUD_CFG(pipe);
6645
		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
2342 Serge 6646
		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6647
	} else {
3031 serge 6648
		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
6649
		aud_config = CPT_AUD_CFG(pipe);
6650
		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
2342 Serge 6651
		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6652
	}
6653
 
3031 serge 6654
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
2342 Serge 6655
 
6656
	i = I915_READ(aud_cntl_st);
3031 serge 6657
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
2342 Serge 6658
	if (!i) {
6659
		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6660
		/* operate blindly on all ports */
6661
		eldv = IBX_ELD_VALIDB;
6662
		eldv |= IBX_ELD_VALIDB << 4;
6663
		eldv |= IBX_ELD_VALIDB << 8;
6664
	} else {
4104 Serge 6665
		DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
2342 Serge 6666
		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6667
	}
6668
 
6669
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6670
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6671
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
3031 serge 6672
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6673
	} else
6674
		I915_WRITE(aud_config, 0);
2342 Serge 6675
 
6676
	if (intel_eld_uptodate(connector,
6677
			       aud_cntrl_st2, eldv,
6678
			       aud_cntl_st, IBX_ELD_ADDRESS,
6679
			       hdmiw_hdmiedid))
6680
		return;
6681
 
6682
	i = I915_READ(aud_cntrl_st2);
6683
	i &= ~eldv;
6684
	I915_WRITE(aud_cntrl_st2, i);
6685
 
6686
	if (!eld[0])
6687
		return;
6688
 
6689
	i = I915_READ(aud_cntl_st);
6690
	i &= ~IBX_ELD_ADDRESS;
6691
	I915_WRITE(aud_cntl_st, i);
6692
 
6693
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
6694
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6695
	for (i = 0; i < len; i++)
6696
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6697
 
6698
	i = I915_READ(aud_cntrl_st2);
6699
	i |= eldv;
6700
	I915_WRITE(aud_cntrl_st2, i);
6701
}
6702
 
6703
void intel_write_eld(struct drm_encoder *encoder,
6704
		     struct drm_display_mode *mode)
6705
{
6706
	struct drm_crtc *crtc = encoder->crtc;
6707
	struct drm_connector *connector;
6708
	struct drm_device *dev = encoder->dev;
6709
	struct drm_i915_private *dev_priv = dev->dev_private;
6710
 
6711
	connector = drm_select_eld(encoder, mode);
6712
	if (!connector)
6713
		return;
6714
 
6715
	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6716
			 connector->base.id,
6717
			 drm_get_connector_name(connector),
6718
			 connector->encoder->base.id,
6719
			 drm_get_encoder_name(connector->encoder));
6720
 
6721
	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6722
 
6723
	if (dev_priv->display.write_eld)
6724
		dev_priv->display.write_eld(connector, crtc);
6725
}
6726
 
2327 Serge 6727
/** Loads the palette/gamma unit for the CRTC with the prepared values */
6728
void intel_crtc_load_lut(struct drm_crtc *crtc)
6729
{
6730
	struct drm_device *dev = crtc->dev;
6731
	struct drm_i915_private *dev_priv = dev->dev_private;
6732
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4104 Serge 6733
	enum pipe pipe = intel_crtc->pipe;
6734
	int palreg = PALETTE(pipe);
2327 Serge 6735
	int i;
4104 Serge 6736
	bool reenable_ips = false;
2327 Serge 6737
 
6738
	/* The clocks have to be on to load the palette. */
3031 serge 6739
	if (!crtc->enabled || !intel_crtc->active)
2327 Serge 6740
		return;
6741
 
4104 Serge 6742
	if (!HAS_PCH_SPLIT(dev_priv->dev))
6743
		assert_pll_enabled(dev_priv, pipe);
6744
 
2327 Serge 6745
	/* use legacy palette for Ironlake */
6746
	if (HAS_PCH_SPLIT(dev))
4104 Serge 6747
		palreg = LGC_PALETTE(pipe);
2327 Serge 6748
 
4104 Serge 6749
	/* Workaround : Do not read or write the pipe palette/gamma data while
6750
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6751
	 */
6752
	if (intel_crtc->config.ips_enabled &&
6753
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
6754
	     GAMMA_MODE_MODE_SPLIT)) {
6755
		hsw_disable_ips(intel_crtc);
6756
		reenable_ips = true;
6757
	}
6758
 
2327 Serge 6759
	for (i = 0; i < 256; i++) {
6760
		I915_WRITE(palreg + 4 * i,
6761
			   (intel_crtc->lut_r[i] << 16) |
6762
			   (intel_crtc->lut_g[i] << 8) |
6763
			   intel_crtc->lut_b[i]);
6764
	}
4104 Serge 6765
 
6766
	if (reenable_ips)
6767
		hsw_enable_ips(intel_crtc);
2327 Serge 6768
}
6769
 
3031 serge 6770
#if 0
6771
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6772
{
6773
	struct drm_device *dev = crtc->dev;
6774
	struct drm_i915_private *dev_priv = dev->dev_private;
6775
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6776
	bool visible = base != 0;
6777
	u32 cntl;
2327 Serge 6778
 
3031 serge 6779
	if (intel_crtc->cursor_visible == visible)
6780
		return;
2327 Serge 6781
 
3031 serge 6782
	cntl = I915_READ(_CURACNTR);
6783
	if (visible) {
6784
		/* On these chipsets we can only modify the base whilst
6785
		 * the cursor is disabled.
6786
		 */
6787
		I915_WRITE(_CURABASE, base);
2327 Serge 6788
 
3031 serge 6789
		cntl &= ~(CURSOR_FORMAT_MASK);
6790
		/* XXX width must be 64, stride 256 => 0x00 << 28 */
6791
		cntl |= CURSOR_ENABLE |
6792
			CURSOR_GAMMA_ENABLE |
6793
			CURSOR_FORMAT_ARGB;
6794
	} else
6795
		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
6796
	I915_WRITE(_CURACNTR, cntl);
2327 Serge 6797
 
3031 serge 6798
	intel_crtc->cursor_visible = visible;
6799
}
4557 Serge 6800
#endif
2327 Serge 6801
 
3031 serge 6802
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6803
{
6804
	struct drm_device *dev = crtc->dev;
6805
	struct drm_i915_private *dev_priv = dev->dev_private;
6806
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6807
	int pipe = intel_crtc->pipe;
6808
	bool visible = base != 0;
2327 Serge 6809
 
3031 serge 6810
	if (intel_crtc->cursor_visible != visible) {
6811
		uint32_t cntl = I915_READ(CURCNTR(pipe));
6812
		if (base) {
6813
			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6814
			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6815
			cntl |= pipe << 28; /* Connect to correct pipe */
6816
		} else {
6817
			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6818
			cntl |= CURSOR_MODE_DISABLE;
6819
		}
6820
		I915_WRITE(CURCNTR(pipe), cntl);
2327 Serge 6821
 
3031 serge 6822
		intel_crtc->cursor_visible = visible;
6823
	}
6824
	/* and commit changes on next vblank */
4371 Serge 6825
	POSTING_READ(CURCNTR(pipe));
3031 serge 6826
	I915_WRITE(CURBASE(pipe), base);
4371 Serge 6827
	POSTING_READ(CURBASE(pipe));
3031 serge 6828
}
2327 Serge 6829
 
3031 serge 6830
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6831
{
6832
	struct drm_device *dev = crtc->dev;
6833
	struct drm_i915_private *dev_priv = dev->dev_private;
6834
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6835
	int pipe = intel_crtc->pipe;
6836
	bool visible = base != 0;
2327 Serge 6837
 
3031 serge 6838
	if (intel_crtc->cursor_visible != visible) {
6839
		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
6840
		if (base) {
6841
			cntl &= ~CURSOR_MODE;
6842
			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6843
		} else {
6844
			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6845
			cntl |= CURSOR_MODE_DISABLE;
6846
		}
4104 Serge 6847
		if (IS_HASWELL(dev)) {
3480 Serge 6848
			cntl |= CURSOR_PIPE_CSC_ENABLE;
4104 Serge 6849
			cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
6850
		}
3031 serge 6851
		I915_WRITE(CURCNTR_IVB(pipe), cntl);
2327 Serge 6852
 
3031 serge 6853
		intel_crtc->cursor_visible = visible;
6854
	}
6855
	/* and commit changes on next vblank */
4371 Serge 6856
	POSTING_READ(CURCNTR_IVB(pipe));
3031 serge 6857
	I915_WRITE(CURBASE_IVB(pipe), base);
4371 Serge 6858
	POSTING_READ(CURBASE_IVB(pipe));
3031 serge 6859
}
2327 Serge 6860
 
3031 serge 6861
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6862
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6863
				     bool on)
6864
{
6865
	struct drm_device *dev = crtc->dev;
6866
	struct drm_i915_private *dev_priv = dev->dev_private;
6867
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6868
	int pipe = intel_crtc->pipe;
6869
	int x = intel_crtc->cursor_x;
6870
	int y = intel_crtc->cursor_y;
6871
	u32 base, pos;
6872
	bool visible;
2327 Serge 6873
 
3031 serge 6874
	pos = 0;
2327 Serge 6875
 
3031 serge 6876
	if (on && crtc->enabled && crtc->fb) {
6877
		base = intel_crtc->cursor_addr;
6878
		if (x > (int) crtc->fb->width)
6879
			base = 0;
2327 Serge 6880
 
3031 serge 6881
		if (y > (int) crtc->fb->height)
6882
			base = 0;
6883
	} else
6884
		base = 0;
2327 Serge 6885
 
3031 serge 6886
	if (x < 0) {
6887
		if (x + intel_crtc->cursor_width < 0)
6888
			base = 0;
2327 Serge 6889
 
3031 serge 6890
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6891
		x = -x;
6892
	}
6893
	pos |= x << CURSOR_X_SHIFT;
2327 Serge 6894
 
3031 serge 6895
	if (y < 0) {
6896
		if (y + intel_crtc->cursor_height < 0)
6897
			base = 0;
2327 Serge 6898
 
3031 serge 6899
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6900
		y = -y;
6901
	}
6902
	pos |= y << CURSOR_Y_SHIFT;
2327 Serge 6903
 
3031 serge 6904
	visible = base != 0;
6905
	if (!visible && !intel_crtc->cursor_visible)
6906
		return;
2327 Serge 6907
 
3031 serge 6908
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
6909
		I915_WRITE(CURPOS_IVB(pipe), pos);
6910
		ivb_update_cursor(crtc, base);
6911
	} else {
6912
		I915_WRITE(CURPOS(pipe), pos);
6913
			i9xx_update_cursor(crtc, base);
6914
	}
6915
}
2327 Serge 6916
 
4557 Serge 6917
#if 0
3031 serge 6918
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6919
				 struct drm_file *file,
6920
				 uint32_t handle,
6921
				 uint32_t width, uint32_t height)
6922
{
6923
	struct drm_device *dev = crtc->dev;
6924
	struct drm_i915_private *dev_priv = dev->dev_private;
6925
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6926
	struct drm_i915_gem_object *obj;
6927
	uint32_t addr;
6928
	int ret;
2327 Serge 6929
 
3031 serge 6930
	/* if we want to turn off the cursor ignore width and height */
6931
	if (!handle) {
6932
		DRM_DEBUG_KMS("cursor off\n");
6933
		addr = 0;
6934
		obj = NULL;
6935
		mutex_lock(&dev->struct_mutex);
6936
		goto finish;
6937
	}
2327 Serge 6938
 
3031 serge 6939
	/* Currently we only support 64x64 cursors */
6940
	if (width != 64 || height != 64) {
6941
		DRM_ERROR("we currently only support 64x64 cursors\n");
6942
		return -EINVAL;
6943
	}
2327 Serge 6944
 
3031 serge 6945
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
6946
	if (&obj->base == NULL)
6947
		return -ENOENT;
2327 Serge 6948
 
3031 serge 6949
	if (obj->base.size < width * height * 4) {
6950
		DRM_ERROR("buffer is to small\n");
6951
		ret = -ENOMEM;
6952
		goto fail;
6953
	}
2327 Serge 6954
 
3031 serge 6955
	/* we only need to pin inside GTT if cursor is non-phy */
6956
	mutex_lock(&dev->struct_mutex);
6957
	if (!dev_priv->info->cursor_needs_physical) {
3746 Serge 6958
		unsigned alignment;
6959
 
3031 serge 6960
		if (obj->tiling_mode) {
6961
			DRM_ERROR("cursor cannot be tiled\n");
6962
			ret = -EINVAL;
6963
			goto fail_locked;
6964
		}
2327 Serge 6965
 
3746 Serge 6966
		/* Note that the w/a also requires 2 PTE of padding following
6967
		 * the bo. We currently fill all unused PTE with the shadow
6968
		 * page and so we should always have valid PTE following the
6969
		 * cursor preventing the VT-d warning.
6970
		 */
6971
		alignment = 0;
6972
		if (need_vtd_wa(dev))
6973
			alignment = 64*1024;
6974
 
6975
		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
3031 serge 6976
		if (ret) {
6977
			DRM_ERROR("failed to move cursor bo into the GTT\n");
6978
			goto fail_locked;
6979
		}
2327 Serge 6980
 
3031 serge 6981
		ret = i915_gem_object_put_fence(obj);
6982
		if (ret) {
6983
			DRM_ERROR("failed to release fence for cursor");
6984
			goto fail_unpin;
6985
		}
2327 Serge 6986
 
4104 Serge 6987
		addr = i915_gem_obj_ggtt_offset(obj);
3031 serge 6988
	} else {
6989
		int align = IS_I830(dev) ? 16 * 1024 : 256;
6990
		ret = i915_gem_attach_phys_object(dev, obj,
6991
						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6992
						  align);
6993
		if (ret) {
6994
			DRM_ERROR("failed to attach phys object\n");
6995
			goto fail_locked;
6996
		}
6997
		addr = obj->phys_obj->handle->busaddr;
6998
	}
2327 Serge 6999
 
3031 serge 7000
	if (IS_GEN2(dev))
7001
		I915_WRITE(CURSIZE, (height << 12) | width);
2327 Serge 7002
 
3031 serge 7003
 finish:
7004
	if (intel_crtc->cursor_bo) {
7005
		if (dev_priv->info->cursor_needs_physical) {
7006
			if (intel_crtc->cursor_bo != obj)
7007
				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7008
		} else
4104 Serge 7009
			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
3031 serge 7010
		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
7011
	}
2327 Serge 7012
 
3031 serge 7013
	mutex_unlock(&dev->struct_mutex);
2327 Serge 7014
 
3031 serge 7015
	intel_crtc->cursor_addr = addr;
7016
	intel_crtc->cursor_bo = obj;
7017
	intel_crtc->cursor_width = width;
7018
	intel_crtc->cursor_height = height;
2327 Serge 7019
 
4104 Serge 7020
	if (intel_crtc->active)
7021
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
2327 Serge 7022
 
3031 serge 7023
	return 0;
7024
fail_unpin:
4104 Serge 7025
	i915_gem_object_unpin_from_display_plane(obj);
3031 serge 7026
fail_locked:
7027
	mutex_unlock(&dev->struct_mutex);
7028
fail:
7029
	drm_gem_object_unreference_unlocked(&obj->base);
7030
	return ret;
7031
}
4557 Serge 7032
#endif
2327 Serge 7033
 
3031 serge 7034
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
7035
{
7036
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7037
 
7038
	intel_crtc->cursor_x = x;
7039
	intel_crtc->cursor_y = y;
7040
 
4104 Serge 7041
	if (intel_crtc->active)
7042
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
3031 serge 7043
 
7044
	return 0;
7045
}
7046
 
2332 Serge 7047
/** Sets the color ramps on behalf of RandR */
7048
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
7049
				 u16 blue, int regno)
7050
{
7051
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 7052
 
2332 Serge 7053
	intel_crtc->lut_r[regno] = red >> 8;
7054
	intel_crtc->lut_g[regno] = green >> 8;
7055
	intel_crtc->lut_b[regno] = blue >> 8;
7056
}
2327 Serge 7057
 
2332 Serge 7058
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
7059
			     u16 *blue, int regno)
7060
{
7061
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 7062
 
2332 Serge 7063
	*red = intel_crtc->lut_r[regno] << 8;
7064
	*green = intel_crtc->lut_g[regno] << 8;
7065
	*blue = intel_crtc->lut_b[regno] << 8;
7066
}
2327 Serge 7067
 
2330 Serge 7068
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7069
				 u16 *blue, uint32_t start, uint32_t size)
7070
{
7071
	int end = (start + size > 256) ? 256 : start + size, i;
7072
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 7073
 
2330 Serge 7074
	for (i = start; i < end; i++) {
7075
		intel_crtc->lut_r[i] = red[i] >> 8;
7076
		intel_crtc->lut_g[i] = green[i] >> 8;
7077
		intel_crtc->lut_b[i] = blue[i] >> 8;
7078
	}
2327 Serge 7079
 
2330 Serge 7080
	intel_crtc_load_lut(crtc);
7081
}
2327 Serge 7082
 
2330 Serge 7083
/* VESA 640x480x72Hz mode to set on the pipe */
7084
static struct drm_display_mode load_detect_mode = {
7085
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
7086
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
7087
};
2327 Serge 7088
 
3031 serge 7089
static struct drm_framebuffer *
7090
intel_framebuffer_create(struct drm_device *dev,
7091
			 struct drm_mode_fb_cmd2 *mode_cmd,
7092
			 struct drm_i915_gem_object *obj)
7093
{
7094
	struct intel_framebuffer *intel_fb;
7095
	int ret;
2327 Serge 7096
 
3031 serge 7097
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7098
	if (!intel_fb) {
7099
		drm_gem_object_unreference_unlocked(&obj->base);
7100
		return ERR_PTR(-ENOMEM);
7101
	}
2327 Serge 7102
 
3031 serge 7103
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
7104
	if (ret) {
7105
		drm_gem_object_unreference_unlocked(&obj->base);
7106
		kfree(intel_fb);
7107
		return ERR_PTR(ret);
7108
	}
2327 Serge 7109
 
3031 serge 7110
	return &intel_fb->base;
7111
}
2327 Serge 7112
 
2330 Serge 7113
static u32
7114
intel_framebuffer_pitch_for_width(int width, int bpp)
7115
{
7116
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
7117
	return ALIGN(pitch, 64);
7118
}
2327 Serge 7119
 
2330 Serge 7120
static u32
7121
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
7122
{
7123
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
7124
	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
7125
}
2327 Serge 7126
 
2330 Serge 7127
static struct drm_framebuffer *
7128
intel_framebuffer_create_for_mode(struct drm_device *dev,
7129
				  struct drm_display_mode *mode,
7130
				  int depth, int bpp)
7131
{
7132
	struct drm_i915_gem_object *obj;
3243 Serge 7133
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2327 Serge 7134
 
4104 Serge 7135
	return NULL;
2330 Serge 7136
}
2327 Serge 7137
 
2330 Serge 7138
static struct drm_framebuffer *
7139
mode_fits_in_fbdev(struct drm_device *dev,
7140
		   struct drm_display_mode *mode)
7141
{
7142
	struct drm_i915_private *dev_priv = dev->dev_private;
7143
	struct drm_i915_gem_object *obj;
7144
	struct drm_framebuffer *fb;
2327 Serge 7145
 
4280 Serge 7146
	if (dev_priv->fbdev == NULL)
7147
		return NULL;
2327 Serge 7148
 
4280 Serge 7149
	obj = dev_priv->fbdev->ifb.obj;
7150
	if (obj == NULL)
2330 Serge 7151
		return NULL;
2327 Serge 7152
 
4280 Serge 7153
	fb = &dev_priv->fbdev->ifb.base;
3031 serge 7154
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
7155
							       fb->bits_per_pixel))
4280 Serge 7156
		return NULL;
2327 Serge 7157
 
3031 serge 7158
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
7159
		return NULL;
7160
 
4280 Serge 7161
	return fb;
2330 Serge 7162
}
2327 Serge 7163
 
3031 serge 7164
bool intel_get_load_detect_pipe(struct drm_connector *connector,
2330 Serge 7165
				struct drm_display_mode *mode,
7166
				struct intel_load_detect_pipe *old)
7167
{
7168
	struct intel_crtc *intel_crtc;
3031 serge 7169
	struct intel_encoder *intel_encoder =
7170
		intel_attached_encoder(connector);
2330 Serge 7171
	struct drm_crtc *possible_crtc;
7172
	struct drm_encoder *encoder = &intel_encoder->base;
7173
	struct drm_crtc *crtc = NULL;
7174
	struct drm_device *dev = encoder->dev;
3031 serge 7175
	struct drm_framebuffer *fb;
2330 Serge 7176
	int i = -1;
2327 Serge 7177
 
2330 Serge 7178
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7179
		      connector->base.id, drm_get_connector_name(connector),
7180
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 7181
 
2330 Serge 7182
	/*
7183
	 * Algorithm gets a little messy:
7184
	 *
7185
	 *   - if the connector already has an assigned crtc, use it (but make
7186
	 *     sure it's on first)
7187
	 *
7188
	 *   - try to find the first unused crtc that can drive this connector,
7189
	 *     and use that if we find one
7190
	 */
2327 Serge 7191
 
2330 Serge 7192
	/* See if we already have a CRTC for this connector */
7193
	if (encoder->crtc) {
7194
		crtc = encoder->crtc;
2327 Serge 7195
 
3480 Serge 7196
		mutex_lock(&crtc->mutex);
7197
 
3031 serge 7198
		old->dpms_mode = connector->dpms;
2330 Serge 7199
		old->load_detect_temp = false;
2327 Serge 7200
 
2330 Serge 7201
		/* Make sure the crtc and connector are running */
3031 serge 7202
		if (connector->dpms != DRM_MODE_DPMS_ON)
7203
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
2327 Serge 7204
 
2330 Serge 7205
		return true;
7206
	}
2327 Serge 7207
 
2330 Serge 7208
	/* Find an unused one (if possible) */
7209
	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
7210
		i++;
7211
		if (!(encoder->possible_crtcs & (1 << i)))
7212
			continue;
7213
		if (!possible_crtc->enabled) {
7214
			crtc = possible_crtc;
7215
			break;
7216
		}
7217
	}
2327 Serge 7218
 
2330 Serge 7219
	/*
7220
	 * If we didn't find an unused CRTC, don't use any.
7221
	 */
7222
	if (!crtc) {
7223
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
7224
		return false;
7225
	}
2327 Serge 7226
 
3480 Serge 7227
	mutex_lock(&crtc->mutex);
3031 serge 7228
	intel_encoder->new_crtc = to_intel_crtc(crtc);
7229
	to_intel_connector(connector)->new_encoder = intel_encoder;
2327 Serge 7230
 
2330 Serge 7231
	intel_crtc = to_intel_crtc(crtc);
3031 serge 7232
	old->dpms_mode = connector->dpms;
2330 Serge 7233
	old->load_detect_temp = true;
7234
	old->release_fb = NULL;
2327 Serge 7235
 
2330 Serge 7236
	if (!mode)
7237
		mode = &load_detect_mode;
2327 Serge 7238
 
2330 Serge 7239
	/* We need a framebuffer large enough to accommodate all accesses
7240
	 * that the plane may generate whilst we perform load detection.
7241
	 * We can not rely on the fbcon either being present (we get called
7242
	 * during its initialisation to detect all boot displays, or it may
7243
	 * not even exist) or that it is large enough to satisfy the
7244
	 * requested mode.
7245
	 */
3031 serge 7246
	fb = mode_fits_in_fbdev(dev, mode);
7247
	if (fb == NULL) {
2330 Serge 7248
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
3031 serge 7249
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
7250
		old->release_fb = fb;
2330 Serge 7251
	} else
7252
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
3031 serge 7253
	if (IS_ERR(fb)) {
2330 Serge 7254
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
3480 Serge 7255
		mutex_unlock(&crtc->mutex);
3243 Serge 7256
		return false;
2330 Serge 7257
	}
2327 Serge 7258
 
3480 Serge 7259
	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
2330 Serge 7260
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
7261
		if (old->release_fb)
7262
			old->release_fb->funcs->destroy(old->release_fb);
3480 Serge 7263
		mutex_unlock(&crtc->mutex);
3243 Serge 7264
		return false;
2330 Serge 7265
	}
2327 Serge 7266
 
2330 Serge 7267
	/* let the connector get through one full cycle before testing */
7268
	intel_wait_for_vblank(dev, intel_crtc->pipe);
7269
	return true;
7270
}
2327 Serge 7271
 
3031 serge 7272
void intel_release_load_detect_pipe(struct drm_connector *connector,
2330 Serge 7273
				    struct intel_load_detect_pipe *old)
7274
{
3031 serge 7275
	struct intel_encoder *intel_encoder =
7276
		intel_attached_encoder(connector);
2330 Serge 7277
	struct drm_encoder *encoder = &intel_encoder->base;
3480 Serge 7278
	struct drm_crtc *crtc = encoder->crtc;
2327 Serge 7279
 
2330 Serge 7280
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7281
		      connector->base.id, drm_get_connector_name(connector),
7282
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 7283
 
2330 Serge 7284
	if (old->load_detect_temp) {
3031 serge 7285
		to_intel_connector(connector)->new_encoder = NULL;
7286
		intel_encoder->new_crtc = NULL;
7287
		intel_set_mode(crtc, NULL, 0, 0, NULL);
7288
 
3480 Serge 7289
		if (old->release_fb) {
7290
			drm_framebuffer_unregister_private(old->release_fb);
7291
			drm_framebuffer_unreference(old->release_fb);
7292
		}
2327 Serge 7293
 
3480 Serge 7294
		mutex_unlock(&crtc->mutex);
2330 Serge 7295
		return;
7296
	}
2327 Serge 7297
 
2330 Serge 7298
	/* Switch crtc and encoder back off if necessary */
3031 serge 7299
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
7300
		connector->funcs->dpms(connector, old->dpms_mode);
3480 Serge 7301
 
7302
	mutex_unlock(&crtc->mutex);
2330 Serge 7303
}
2327 Serge 7304
 
2330 Serge 7305
/* Returns the clock of the currently programmed mode of the given pipe. */
4104 Serge 7306
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7307
				struct intel_crtc_config *pipe_config)
2330 Serge 7308
{
4104 Serge 7309
	struct drm_device *dev = crtc->base.dev;
2330 Serge 7310
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 7311
	int pipe = pipe_config->cpu_transcoder;
2330 Serge 7312
	u32 dpll = I915_READ(DPLL(pipe));
7313
	u32 fp;
7314
	intel_clock_t clock;
2327 Serge 7315
 
2330 Serge 7316
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7317
		fp = I915_READ(FP0(pipe));
7318
	else
7319
		fp = I915_READ(FP1(pipe));
2327 Serge 7320
 
2330 Serge 7321
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7322
	if (IS_PINEVIEW(dev)) {
7323
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7324
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
7325
	} else {
7326
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7327
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7328
	}
2327 Serge 7329
 
2330 Serge 7330
	if (!IS_GEN2(dev)) {
7331
		if (IS_PINEVIEW(dev))
7332
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7333
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
7334
		else
7335
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
7336
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 7337
 
2330 Serge 7338
		switch (dpll & DPLL_MODE_MASK) {
7339
		case DPLLB_MODE_DAC_SERIAL:
7340
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7341
				5 : 10;
7342
			break;
7343
		case DPLLB_MODE_LVDS:
7344
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7345
				7 : 14;
7346
			break;
7347
		default:
7348
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
7349
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
4104 Serge 7350
			pipe_config->adjusted_mode.clock = 0;
7351
			return;
2330 Serge 7352
		}
2327 Serge 7353
 
4104 Serge 7354
		if (IS_PINEVIEW(dev))
7355
			pineview_clock(96000, &clock);
7356
		else
7357
			i9xx_clock(96000, &clock);
2330 Serge 7358
	} else {
7359
		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
2327 Serge 7360
 
2330 Serge 7361
		if (is_lvds) {
7362
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7363
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
7364
			clock.p2 = 14;
2327 Serge 7365
 
2330 Serge 7366
			if ((dpll & PLL_REF_INPUT_MASK) ==
7367
			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
7368
				/* XXX: might not be 66MHz */
4104 Serge 7369
				i9xx_clock(66000, &clock);
2330 Serge 7370
			} else
4104 Serge 7371
				i9xx_clock(48000, &clock);
2330 Serge 7372
		} else {
7373
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
7374
				clock.p1 = 2;
7375
			else {
7376
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
7377
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
7378
			}
7379
			if (dpll & PLL_P2_DIVIDE_BY_4)
7380
				clock.p2 = 4;
7381
			else
7382
				clock.p2 = 2;
2327 Serge 7383
 
4104 Serge 7384
			i9xx_clock(48000, &clock);
2330 Serge 7385
		}
7386
	}
2327 Serge 7387
 
4104 Serge 7388
	pipe_config->adjusted_mode.clock = clock.dot;
7389
}
7390
 
7391
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
7392
				    struct intel_crtc_config *pipe_config)
7393
{
7394
	struct drm_device *dev = crtc->base.dev;
7395
	struct drm_i915_private *dev_priv = dev->dev_private;
7396
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7397
	int link_freq, repeat;
7398
	u64 clock;
7399
	u32 link_m, link_n;
7400
 
7401
	repeat = pipe_config->pixel_multiplier;
7402
 
7403
	/*
7404
	 * The calculation for the data clock is:
7405
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
7406
	 * But we want to avoid losing precison if possible, so:
7407
	 * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
7408
	 *
7409
	 * and the link clock is simpler:
7410
	 * link_clock = (m * link_clock * repeat) / n
2330 Serge 7411
	 */
2327 Serge 7412
 
4104 Serge 7413
	/*
7414
	 * We need to get the FDI or DP link clock here to derive
7415
	 * the M/N dividers.
7416
	 *
7417
	 * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
7418
	 * For DP, it's either 1.62GHz or 2.7GHz.
7419
	 * We do our calculations in 10*MHz since we don't need much precison.
7420
	 */
7421
	if (pipe_config->has_pch_encoder)
7422
		link_freq = intel_fdi_link_freq(dev) * 10000;
7423
	else
7424
		link_freq = pipe_config->port_clock;
7425
 
7426
	link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
7427
	link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
7428
 
7429
	if (!link_m || !link_n)
7430
		return;
7431
 
7432
	clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
7433
	do_div(clock, link_n);
7434
 
7435
	pipe_config->adjusted_mode.clock = clock;
2330 Serge 7436
}
2327 Serge 7437
 
2330 Serge 7438
/** Returns the currently programmed mode of the given pipe. */
7439
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7440
					     struct drm_crtc *crtc)
7441
{
7442
	struct drm_i915_private *dev_priv = dev->dev_private;
7443
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746 Serge 7444
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
2330 Serge 7445
	struct drm_display_mode *mode;
4104 Serge 7446
	struct intel_crtc_config pipe_config;
3243 Serge 7447
	int htot = I915_READ(HTOTAL(cpu_transcoder));
7448
	int hsync = I915_READ(HSYNC(cpu_transcoder));
7449
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
7450
	int vsync = I915_READ(VSYNC(cpu_transcoder));
2327 Serge 7451
 
2330 Serge 7452
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
7453
	if (!mode)
7454
		return NULL;
7455
 
4104 Serge 7456
	/*
7457
	 * Construct a pipe_config sufficient for getting the clock info
7458
	 * back out of crtc_clock_get.
7459
	 *
7460
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
7461
	 * to use a real value here instead.
7462
	 */
7463
	pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
7464
	pipe_config.pixel_multiplier = 1;
7465
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
7466
 
7467
	mode->clock = pipe_config.adjusted_mode.clock;
2330 Serge 7468
	mode->hdisplay = (htot & 0xffff) + 1;
7469
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
7470
	mode->hsync_start = (hsync & 0xffff) + 1;
7471
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
7472
	mode->vdisplay = (vtot & 0xffff) + 1;
7473
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
7474
	mode->vsync_start = (vsync & 0xffff) + 1;
7475
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
7476
 
7477
	drm_mode_set_name(mode);
7478
 
7479
	return mode;
7480
}
7481
 
2327 Serge 7482
static void intel_increase_pllclock(struct drm_crtc *crtc)
7483
{
7484
	struct drm_device *dev = crtc->dev;
7485
	drm_i915_private_t *dev_priv = dev->dev_private;
7486
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7487
	int pipe = intel_crtc->pipe;
7488
	int dpll_reg = DPLL(pipe);
7489
	int dpll;
7490
 
7491
	if (HAS_PCH_SPLIT(dev))
7492
		return;
7493
 
7494
	if (!dev_priv->lvds_downclock_avail)
7495
		return;
7496
 
7497
	dpll = I915_READ(dpll_reg);
7498
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
7499
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
7500
 
3031 serge 7501
		assert_panel_unlocked(dev_priv, pipe);
2327 Serge 7502
 
7503
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
7504
		I915_WRITE(dpll_reg, dpll);
7505
		intel_wait_for_vblank(dev, pipe);
7506
 
7507
		dpll = I915_READ(dpll_reg);
7508
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
7509
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
7510
	}
7511
}
7512
 
3031 serge 7513
static void intel_decrease_pllclock(struct drm_crtc *crtc)
7514
{
7515
	struct drm_device *dev = crtc->dev;
7516
	drm_i915_private_t *dev_priv = dev->dev_private;
7517
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 7518
 
3031 serge 7519
	if (HAS_PCH_SPLIT(dev))
7520
		return;
2327 Serge 7521
 
3031 serge 7522
	if (!dev_priv->lvds_downclock_avail)
7523
		return;
2327 Serge 7524
 
3031 serge 7525
	/*
7526
	 * Since this is called by a timer, we should never get here in
7527
	 * the manual case.
7528
	 */
7529
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7530
		int pipe = intel_crtc->pipe;
7531
		int dpll_reg = DPLL(pipe);
7532
		int dpll;
2327 Serge 7533
 
3031 serge 7534
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
2327 Serge 7535
 
3031 serge 7536
		assert_panel_unlocked(dev_priv, pipe);
2327 Serge 7537
 
3031 serge 7538
		dpll = I915_READ(dpll_reg);
7539
		dpll |= DISPLAY_RATE_SELECT_FPA1;
7540
		I915_WRITE(dpll_reg, dpll);
7541
		intel_wait_for_vblank(dev, pipe);
7542
		dpll = I915_READ(dpll_reg);
7543
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7544
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7545
	}
2327 Serge 7546
 
3031 serge 7547
}
2327 Serge 7548
 
3031 serge 7549
void intel_mark_busy(struct drm_device *dev)
7550
{
4104 Serge 7551
	struct drm_i915_private *dev_priv = dev->dev_private;
7552
 
7553
	hsw_package_c8_gpu_busy(dev_priv);
7554
	i915_update_gfx_val(dev_priv);
3031 serge 7555
}
2327 Serge 7556
 
3031 serge 7557
void intel_mark_idle(struct drm_device *dev)
7558
{
4104 Serge 7559
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 7560
	struct drm_crtc *crtc;
2327 Serge 7561
 
4104 Serge 7562
	hsw_package_c8_gpu_idle(dev_priv);
7563
 
3031 serge 7564
	if (!i915_powersave)
7565
		return;
2327 Serge 7566
 
3031 serge 7567
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7568
		if (!crtc->fb)
7569
			continue;
2327 Serge 7570
 
3480 Serge 7571
		intel_decrease_pllclock(crtc);
3031 serge 7572
	}
7573
}
2327 Serge 7574
 
4104 Serge 7575
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
7576
			struct intel_ring_buffer *ring)
3031 serge 7577
{
7578
	struct drm_device *dev = obj->base.dev;
7579
	struct drm_crtc *crtc;
2327 Serge 7580
 
3031 serge 7581
	if (!i915_powersave)
7582
		return;
2327 Serge 7583
 
3031 serge 7584
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7585
		if (!crtc->fb)
7586
			continue;
2327 Serge 7587
 
4104 Serge 7588
		if (to_intel_framebuffer(crtc->fb)->obj != obj)
7589
			continue;
7590
 
3480 Serge 7591
			intel_increase_pllclock(crtc);
4104 Serge 7592
		if (ring && intel_fbc_enabled(dev))
7593
			ring->fbc_dirty = true;
3031 serge 7594
	}
7595
}
2327 Serge 7596
 
2330 Serge 7597
static void intel_crtc_destroy(struct drm_crtc *crtc)
7598
{
7599
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7600
	struct drm_device *dev = crtc->dev;
7601
	struct intel_unpin_work *work;
7602
	unsigned long flags;
2327 Serge 7603
 
2330 Serge 7604
	spin_lock_irqsave(&dev->event_lock, flags);
7605
	work = intel_crtc->unpin_work;
7606
	intel_crtc->unpin_work = NULL;
7607
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 7608
 
2330 Serge 7609
	if (work) {
4293 Serge 7610
		cancel_work_sync(&work->work);
2330 Serge 7611
		kfree(work);
7612
	}
2327 Serge 7613
 
2330 Serge 7614
	drm_crtc_cleanup(crtc);
2327 Serge 7615
 
2330 Serge 7616
	kfree(intel_crtc);
7617
}
2327 Serge 7618
 
3031 serge 7619
#if 0
7620
static void intel_unpin_work_fn(struct work_struct *__work)
7621
{
7622
	struct intel_unpin_work *work =
7623
		container_of(__work, struct intel_unpin_work, work);
3243 Serge 7624
	struct drm_device *dev = work->crtc->dev;
2327 Serge 7625
 
3243 Serge 7626
	mutex_lock(&dev->struct_mutex);
3031 serge 7627
	intel_unpin_fb_obj(work->old_fb_obj);
7628
	drm_gem_object_unreference(&work->pending_flip_obj->base);
7629
	drm_gem_object_unreference(&work->old_fb_obj->base);
2327 Serge 7630
 
3243 Serge 7631
	intel_update_fbc(dev);
7632
	mutex_unlock(&dev->struct_mutex);
7633
 
7634
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
7635
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
7636
 
3031 serge 7637
	kfree(work);
7638
}
2327 Serge 7639
 
3031 serge 7640
static void do_intel_finish_page_flip(struct drm_device *dev,
7641
				      struct drm_crtc *crtc)
7642
{
7643
	drm_i915_private_t *dev_priv = dev->dev_private;
7644
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7645
	struct intel_unpin_work *work;
7646
	unsigned long flags;
2327 Serge 7647
 
3031 serge 7648
	/* Ignore early vblank irqs */
7649
	if (intel_crtc == NULL)
7650
		return;
2327 Serge 7651
 
3031 serge 7652
	spin_lock_irqsave(&dev->event_lock, flags);
7653
	work = intel_crtc->unpin_work;
3243 Serge 7654
 
7655
	/* Ensure we don't miss a work->pending update ... */
7656
	smp_rmb();
7657
 
7658
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
3031 serge 7659
		spin_unlock_irqrestore(&dev->event_lock, flags);
7660
		return;
7661
	}
2327 Serge 7662
 
3243 Serge 7663
	/* and that the unpin work is consistent wrt ->pending. */
7664
	smp_rmb();
7665
 
3031 serge 7666
	intel_crtc->unpin_work = NULL;
2327 Serge 7667
 
3243 Serge 7668
	if (work->event)
7669
		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
2327 Serge 7670
 
3031 serge 7671
	drm_vblank_put(dev, intel_crtc->pipe);
2327 Serge 7672
 
3031 serge 7673
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 7674
 
3480 Serge 7675
	wake_up_all(&dev_priv->pending_flip_queue);
2327 Serge 7676
 
3243 Serge 7677
	queue_work(dev_priv->wq, &work->work);
7678
 
3031 serge 7679
	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
7680
}
2327 Serge 7681
 
3031 serge 7682
void intel_finish_page_flip(struct drm_device *dev, int pipe)
7683
{
7684
	drm_i915_private_t *dev_priv = dev->dev_private;
7685
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2327 Serge 7686
 
3031 serge 7687
	do_intel_finish_page_flip(dev, crtc);
7688
}
2327 Serge 7689
 
3031 serge 7690
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
7691
{
7692
	drm_i915_private_t *dev_priv = dev->dev_private;
7693
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
2327 Serge 7694
 
3031 serge 7695
	do_intel_finish_page_flip(dev, crtc);
7696
}
2327 Serge 7697
 
3031 serge 7698
void intel_prepare_page_flip(struct drm_device *dev, int plane)
7699
{
7700
	drm_i915_private_t *dev_priv = dev->dev_private;
7701
	struct intel_crtc *intel_crtc =
7702
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7703
	unsigned long flags;
2327 Serge 7704
 
3243 Serge 7705
	/* NB: An MMIO update of the plane base pointer will also
7706
	 * generate a page-flip completion irq, i.e. every modeset
7707
	 * is also accompanied by a spurious intel_prepare_page_flip().
7708
	 */
3031 serge 7709
	spin_lock_irqsave(&dev->event_lock, flags);
3243 Serge 7710
	if (intel_crtc->unpin_work)
7711
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
3031 serge 7712
	spin_unlock_irqrestore(&dev->event_lock, flags);
7713
}
2327 Serge 7714
 
3243 Serge 7715
inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7716
{
7717
	/* Ensure that the work item is consistent when activating it ... */
7718
	smp_wmb();
7719
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
7720
	/* and that it is marked active as soon as the irq could fire. */
7721
	smp_wmb();
7722
}
7723
 
3031 serge 7724
static int intel_gen2_queue_flip(struct drm_device *dev,
7725
				 struct drm_crtc *crtc,
7726
				 struct drm_framebuffer *fb,
4104 Serge 7727
				 struct drm_i915_gem_object *obj,
7728
				 uint32_t flags)
3031 serge 7729
{
7730
	struct drm_i915_private *dev_priv = dev->dev_private;
7731
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7732
	u32 flip_mask;
7733
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7734
	int ret;
2327 Serge 7735
 
3031 serge 7736
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7737
	if (ret)
7738
		goto err;
2327 Serge 7739
 
3031 serge 7740
	ret = intel_ring_begin(ring, 6);
7741
	if (ret)
7742
		goto err_unpin;
2327 Serge 7743
 
3031 serge 7744
	/* Can't queue multiple flips, so wait for the previous
7745
	 * one to finish before executing the next.
7746
	 */
7747
	if (intel_crtc->plane)
7748
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7749
	else
7750
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7751
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
7752
	intel_ring_emit(ring, MI_NOOP);
7753
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
7754
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7755
	intel_ring_emit(ring, fb->pitches[0]);
4104 Serge 7756
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
3031 serge 7757
	intel_ring_emit(ring, 0); /* aux display base address, unused */
3243 Serge 7758
 
7759
	intel_mark_page_flip_active(intel_crtc);
3031 serge 7760
	intel_ring_advance(ring);
7761
	return 0;
2327 Serge 7762
 
3031 serge 7763
err_unpin:
7764
	intel_unpin_fb_obj(obj);
7765
err:
7766
	return ret;
7767
}
2327 Serge 7768
 
3031 serge 7769
static int intel_gen3_queue_flip(struct drm_device *dev,
7770
				 struct drm_crtc *crtc,
7771
				 struct drm_framebuffer *fb,
4104 Serge 7772
				 struct drm_i915_gem_object *obj,
7773
				 uint32_t flags)
3031 serge 7774
{
7775
	struct drm_i915_private *dev_priv = dev->dev_private;
7776
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7777
	u32 flip_mask;
7778
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7779
	int ret;
2327 Serge 7780
 
3031 serge 7781
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7782
	if (ret)
7783
		goto err;
2327 Serge 7784
 
3031 serge 7785
	ret = intel_ring_begin(ring, 6);
7786
	if (ret)
7787
		goto err_unpin;
2327 Serge 7788
 
3031 serge 7789
	if (intel_crtc->plane)
7790
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7791
	else
7792
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7793
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
7794
	intel_ring_emit(ring, MI_NOOP);
7795
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
7796
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7797
	intel_ring_emit(ring, fb->pitches[0]);
4104 Serge 7798
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
3031 serge 7799
	intel_ring_emit(ring, MI_NOOP);
2327 Serge 7800
 
3243 Serge 7801
	intel_mark_page_flip_active(intel_crtc);
3031 serge 7802
	intel_ring_advance(ring);
7803
	return 0;
2327 Serge 7804
 
3031 serge 7805
err_unpin:
7806
	intel_unpin_fb_obj(obj);
7807
err:
7808
	return ret;
7809
}
2327 Serge 7810
 
3031 serge 7811
static int intel_gen4_queue_flip(struct drm_device *dev,
7812
				 struct drm_crtc *crtc,
7813
				 struct drm_framebuffer *fb,
4104 Serge 7814
				 struct drm_i915_gem_object *obj,
7815
				 uint32_t flags)
3031 serge 7816
{
7817
	struct drm_i915_private *dev_priv = dev->dev_private;
7818
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7819
	uint32_t pf, pipesrc;
7820
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7821
	int ret;
2327 Serge 7822
 
3031 serge 7823
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7824
	if (ret)
7825
		goto err;
2327 Serge 7826
 
3031 serge 7827
	ret = intel_ring_begin(ring, 4);
7828
	if (ret)
7829
		goto err_unpin;
2327 Serge 7830
 
3031 serge 7831
	/* i965+ uses the linear or tiled offsets from the
7832
	 * Display Registers (which do not change across a page-flip)
7833
	 * so we need only reprogram the base address.
7834
	 */
7835
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
7836
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7837
	intel_ring_emit(ring, fb->pitches[0]);
7838
	intel_ring_emit(ring,
4104 Serge 7839
			(i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
3031 serge 7840
			obj->tiling_mode);
2327 Serge 7841
 
3031 serge 7842
	/* XXX Enabling the panel-fitter across page-flip is so far
7843
	 * untested on non-native modes, so ignore it for now.
7844
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7845
	 */
7846
	pf = 0;
7847
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7848
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 7849
 
7850
	intel_mark_page_flip_active(intel_crtc);
3031 serge 7851
	intel_ring_advance(ring);
7852
	return 0;
2327 Serge 7853
 
3031 serge 7854
err_unpin:
7855
	intel_unpin_fb_obj(obj);
7856
err:
7857
	return ret;
7858
}
2327 Serge 7859
 
3031 serge 7860
static int intel_gen6_queue_flip(struct drm_device *dev,
7861
				 struct drm_crtc *crtc,
7862
				 struct drm_framebuffer *fb,
4104 Serge 7863
				 struct drm_i915_gem_object *obj,
7864
				 uint32_t flags)
3031 serge 7865
{
7866
	struct drm_i915_private *dev_priv = dev->dev_private;
7867
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7868
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7869
	uint32_t pf, pipesrc;
7870
	int ret;
2327 Serge 7871
 
3031 serge 7872
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7873
	if (ret)
7874
		goto err;
2327 Serge 7875
 
3031 serge 7876
	ret = intel_ring_begin(ring, 4);
7877
	if (ret)
7878
		goto err_unpin;
2327 Serge 7879
 
3031 serge 7880
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
7881
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7882
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
4104 Serge 7883
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2327 Serge 7884
 
3031 serge 7885
	/* Contrary to the suggestions in the documentation,
7886
	 * "Enable Panel Fitter" does not seem to be required when page
7887
	 * flipping with a non-native mode, and worse causes a normal
7888
	 * modeset to fail.
7889
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7890
	 */
7891
	pf = 0;
7892
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7893
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 7894
 
7895
	intel_mark_page_flip_active(intel_crtc);
3031 serge 7896
	intel_ring_advance(ring);
7897
	return 0;
2327 Serge 7898
 
3031 serge 7899
err_unpin:
7900
	intel_unpin_fb_obj(obj);
7901
err:
7902
	return ret;
7903
}
2327 Serge 7904
 
3031 serge 7905
static int intel_gen7_queue_flip(struct drm_device *dev,
7906
				 struct drm_crtc *crtc,
7907
				 struct drm_framebuffer *fb,
4104 Serge 7908
				 struct drm_i915_gem_object *obj,
7909
				 uint32_t flags)
3031 serge 7910
{
7911
	struct drm_i915_private *dev_priv = dev->dev_private;
7912
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4104 Serge 7913
	struct intel_ring_buffer *ring;
3031 serge 7914
	uint32_t plane_bit = 0;
4104 Serge 7915
	int len, ret;
2327 Serge 7916
 
4104 Serge 7917
	ring = obj->ring;
7918
	if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
7919
		ring = &dev_priv->ring[BCS];
7920
 
3031 serge 7921
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7922
	if (ret)
7923
		goto err;
2327 Serge 7924
 
3031 serge 7925
	switch(intel_crtc->plane) {
7926
	case PLANE_A:
7927
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
7928
		break;
7929
	case PLANE_B:
7930
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
7931
		break;
7932
	case PLANE_C:
7933
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
7934
		break;
7935
	default:
7936
		WARN_ONCE(1, "unknown plane in flip command\n");
7937
		ret = -ENODEV;
7938
		goto err_unpin;
7939
	}
2327 Serge 7940
 
4104 Serge 7941
	len = 4;
7942
	if (ring->id == RCS)
7943
		len += 6;
7944
 
7945
	ret = intel_ring_begin(ring, len);
3031 serge 7946
	if (ret)
7947
		goto err_unpin;
2327 Serge 7948
 
4104 Serge 7949
	/* Unmask the flip-done completion message. Note that the bspec says that
7950
	 * we should do this for both the BCS and RCS, and that we must not unmask
7951
	 * more than one flip event at any time (or ensure that one flip message
7952
	 * can be sent by waiting for flip-done prior to queueing new flips).
7953
	 * Experimentation says that BCS works despite DERRMR masking all
7954
	 * flip-done completion events and that unmasking all planes at once
7955
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
7956
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
7957
	 */
7958
	if (ring->id == RCS) {
7959
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
7960
		intel_ring_emit(ring, DERRMR);
7961
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
7962
					DERRMR_PIPEB_PRI_FLIP_DONE |
7963
					DERRMR_PIPEC_PRI_FLIP_DONE));
7964
		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
7965
		intel_ring_emit(ring, DERRMR);
7966
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
7967
	}
7968
 
3031 serge 7969
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
7970
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
4104 Serge 7971
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
3031 serge 7972
	intel_ring_emit(ring, (MI_NOOP));
3243 Serge 7973
 
7974
	intel_mark_page_flip_active(intel_crtc);
3031 serge 7975
	intel_ring_advance(ring);
7976
	return 0;
2327 Serge 7977
 
3031 serge 7978
err_unpin:
7979
	intel_unpin_fb_obj(obj);
7980
err:
7981
	return ret;
7982
}
2327 Serge 7983
 
3031 serge 7984
static int intel_default_queue_flip(struct drm_device *dev,
7985
				    struct drm_crtc *crtc,
7986
				    struct drm_framebuffer *fb,
4104 Serge 7987
				    struct drm_i915_gem_object *obj,
7988
				    uint32_t flags)
3031 serge 7989
{
7990
	return -ENODEV;
7991
}
2327 Serge 7992
 
3031 serge 7993
static int intel_crtc_page_flip(struct drm_crtc *crtc,
7994
				struct drm_framebuffer *fb,
4104 Serge 7995
				struct drm_pending_vblank_event *event,
7996
				uint32_t page_flip_flags)
3031 serge 7997
{
7998
	struct drm_device *dev = crtc->dev;
7999
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 8000
	struct drm_framebuffer *old_fb = crtc->fb;
8001
	struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
3031 serge 8002
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8003
	struct intel_unpin_work *work;
8004
	unsigned long flags;
8005
	int ret;
2327 Serge 8006
 
3031 serge 8007
	/* Can't change pixel format via MI display flips. */
8008
	if (fb->pixel_format != crtc->fb->pixel_format)
8009
		return -EINVAL;
2327 Serge 8010
 
3031 serge 8011
	/*
8012
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
8013
	 * Note that pitch changes could also affect these register.
8014
	 */
8015
	if (INTEL_INFO(dev)->gen > 3 &&
8016
	    (fb->offsets[0] != crtc->fb->offsets[0] ||
8017
	     fb->pitches[0] != crtc->fb->pitches[0]))
8018
		return -EINVAL;
2327 Serge 8019
 
3031 serge 8020
	work = kzalloc(sizeof *work, GFP_KERNEL);
8021
	if (work == NULL)
8022
		return -ENOMEM;
2327 Serge 8023
 
3031 serge 8024
	work->event = event;
3243 Serge 8025
	work->crtc = crtc;
3480 Serge 8026
	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
3031 serge 8027
	INIT_WORK(&work->work, intel_unpin_work_fn);
2327 Serge 8028
 
3031 serge 8029
	ret = drm_vblank_get(dev, intel_crtc->pipe);
8030
	if (ret)
8031
		goto free_work;
2327 Serge 8032
 
3031 serge 8033
	/* We borrow the event spin lock for protecting unpin_work */
8034
	spin_lock_irqsave(&dev->event_lock, flags);
8035
	if (intel_crtc->unpin_work) {
8036
		spin_unlock_irqrestore(&dev->event_lock, flags);
8037
		kfree(work);
8038
		drm_vblank_put(dev, intel_crtc->pipe);
2327 Serge 8039
 
3031 serge 8040
		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
8041
		return -EBUSY;
8042
	}
8043
	intel_crtc->unpin_work = work;
8044
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 8045
 
3243 Serge 8046
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
8047
		flush_workqueue(dev_priv->wq);
8048
 
3031 serge 8049
	ret = i915_mutex_lock_interruptible(dev);
8050
	if (ret)
8051
		goto cleanup;
2327 Serge 8052
 
3031 serge 8053
	/* Reference the objects for the scheduled work. */
8054
	drm_gem_object_reference(&work->old_fb_obj->base);
8055
	drm_gem_object_reference(&obj->base);
2327 Serge 8056
 
3031 serge 8057
	crtc->fb = fb;
2327 Serge 8058
 
3031 serge 8059
	work->pending_flip_obj = obj;
2327 Serge 8060
 
3031 serge 8061
	work->enable_stall_check = true;
8062
 
3243 Serge 8063
	atomic_inc(&intel_crtc->unpin_work_count);
3480 Serge 8064
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3031 serge 8065
 
4104 Serge 8066
	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
3031 serge 8067
	if (ret)
8068
		goto cleanup_pending;
8069
 
8070
	intel_disable_fbc(dev);
4104 Serge 8071
	intel_mark_fb_busy(obj, NULL);
3031 serge 8072
	mutex_unlock(&dev->struct_mutex);
8073
 
8074
	trace_i915_flip_request(intel_crtc->plane, obj);
8075
 
8076
	return 0;
8077
 
8078
cleanup_pending:
3243 Serge 8079
	atomic_dec(&intel_crtc->unpin_work_count);
3480 Serge 8080
	crtc->fb = old_fb;
3031 serge 8081
	drm_gem_object_unreference(&work->old_fb_obj->base);
8082
	drm_gem_object_unreference(&obj->base);
8083
	mutex_unlock(&dev->struct_mutex);
8084
 
8085
cleanup:
8086
	spin_lock_irqsave(&dev->event_lock, flags);
8087
	intel_crtc->unpin_work = NULL;
8088
	spin_unlock_irqrestore(&dev->event_lock, flags);
8089
 
8090
	drm_vblank_put(dev, intel_crtc->pipe);
8091
free_work:
8092
	kfree(work);
8093
 
8094
	return ret;
8095
}
8096
#endif
8097
 
8098
static struct drm_crtc_helper_funcs intel_helper_funcs = {
8099
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
8100
	.load_lut = intel_crtc_load_lut,
8101
};
8102
 
8103
static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
8104
				  struct drm_crtc *crtc)
8105
{
8106
	struct drm_device *dev;
8107
	struct drm_crtc *tmp;
8108
	int crtc_mask = 1;
8109
 
8110
	WARN(!crtc, "checking null crtc?\n");
8111
 
8112
	dev = crtc->dev;
8113
 
8114
	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
8115
		if (tmp == crtc)
8116
			break;
8117
		crtc_mask <<= 1;
8118
	}
8119
 
8120
	if (encoder->possible_crtcs & crtc_mask)
8121
		return true;
8122
	return false;
8123
}
8124
 
8125
/**
8126
 * intel_modeset_update_staged_output_state
8127
 *
8128
 * Updates the staged output configuration state, e.g. after we've read out the
8129
 * current hw state.
8130
 */
8131
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8132
{
8133
	struct intel_encoder *encoder;
8134
	struct intel_connector *connector;
8135
 
8136
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8137
			    base.head) {
8138
		connector->new_encoder =
8139
			to_intel_encoder(connector->base.encoder);
8140
	}
8141
 
8142
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8143
			    base.head) {
8144
		encoder->new_crtc =
8145
			to_intel_crtc(encoder->base.crtc);
8146
	}
8147
}
8148
 
8149
/**
8150
 * intel_modeset_commit_output_state
8151
 *
8152
 * This function copies the stage display pipe configuration to the real one.
8153
 */
8154
static void intel_modeset_commit_output_state(struct drm_device *dev)
8155
{
8156
	struct intel_encoder *encoder;
8157
	struct intel_connector *connector;
8158
 
8159
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8160
			    base.head) {
8161
		connector->base.encoder = &connector->new_encoder->base;
8162
	}
8163
 
8164
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8165
			    base.head) {
8166
		encoder->base.crtc = &encoder->new_crtc->base;
8167
	}
8168
}
8169
 
4104 Serge 8170
static void
8171
connected_sink_compute_bpp(struct intel_connector * connector,
8172
			   struct intel_crtc_config *pipe_config)
8173
{
8174
	int bpp = pipe_config->pipe_bpp;
8175
 
8176
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
8177
		connector->base.base.id,
8178
		drm_get_connector_name(&connector->base));
8179
 
8180
	/* Don't use an invalid EDID bpc value */
8181
	if (connector->base.display_info.bpc &&
8182
	    connector->base.display_info.bpc * 3 < bpp) {
8183
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
8184
			      bpp, connector->base.display_info.bpc*3);
8185
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
8186
	}
8187
 
8188
	/* Clamp bpp to 8 on screens without EDID 1.4 */
8189
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
8190
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
8191
			      bpp);
8192
		pipe_config->pipe_bpp = 24;
8193
	}
8194
}
8195
 
3746 Serge 8196
static int
4104 Serge 8197
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
3746 Serge 8198
		    struct drm_framebuffer *fb,
8199
		    struct intel_crtc_config *pipe_config)
8200
{
4104 Serge 8201
	struct drm_device *dev = crtc->base.dev;
8202
	struct intel_connector *connector;
3746 Serge 8203
	int bpp;
8204
 
8205
	switch (fb->pixel_format) {
8206
	case DRM_FORMAT_C8:
8207
		bpp = 8*3; /* since we go through a colormap */
8208
		break;
8209
	case DRM_FORMAT_XRGB1555:
8210
	case DRM_FORMAT_ARGB1555:
8211
		/* checked in intel_framebuffer_init already */
8212
		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
8213
			return -EINVAL;
8214
	case DRM_FORMAT_RGB565:
8215
		bpp = 6*3; /* min is 18bpp */
8216
		break;
8217
	case DRM_FORMAT_XBGR8888:
8218
	case DRM_FORMAT_ABGR8888:
8219
		/* checked in intel_framebuffer_init already */
8220
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8221
			return -EINVAL;
8222
	case DRM_FORMAT_XRGB8888:
8223
	case DRM_FORMAT_ARGB8888:
8224
		bpp = 8*3;
8225
		break;
8226
	case DRM_FORMAT_XRGB2101010:
8227
	case DRM_FORMAT_ARGB2101010:
8228
	case DRM_FORMAT_XBGR2101010:
8229
	case DRM_FORMAT_ABGR2101010:
8230
		/* checked in intel_framebuffer_init already */
8231
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8232
			return -EINVAL;
8233
		bpp = 10*3;
8234
		break;
8235
	/* TODO: gen4+ supports 16 bpc floating point, too. */
8236
	default:
8237
		DRM_DEBUG_KMS("unsupported depth\n");
8238
		return -EINVAL;
8239
	}
8240
 
8241
	pipe_config->pipe_bpp = bpp;
8242
 
8243
	/* Clamp display bpp to EDID value */
8244
	list_for_each_entry(connector, &dev->mode_config.connector_list,
4104 Serge 8245
			    base.head) {
8246
		if (!connector->new_encoder ||
8247
		    connector->new_encoder->new_crtc != crtc)
3746 Serge 8248
			continue;
8249
 
4104 Serge 8250
		connected_sink_compute_bpp(connector, pipe_config);
3746 Serge 8251
	}
8252
 
8253
	return bpp;
8254
}
8255
 
4104 Serge 8256
static void intel_dump_pipe_config(struct intel_crtc *crtc,
8257
				   struct intel_crtc_config *pipe_config,
8258
				   const char *context)
8259
{
8260
	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
8261
		      context, pipe_name(crtc->pipe));
8262
 
8263
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
8264
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
8265
		      pipe_config->pipe_bpp, pipe_config->dither);
8266
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8267
		      pipe_config->has_pch_encoder,
8268
		      pipe_config->fdi_lanes,
8269
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
8270
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
8271
		      pipe_config->fdi_m_n.tu);
8272
	DRM_DEBUG_KMS("requested mode:\n");
8273
	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
8274
	DRM_DEBUG_KMS("adjusted mode:\n");
8275
	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
8276
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8277
		      pipe_config->gmch_pfit.control,
8278
		      pipe_config->gmch_pfit.pgm_ratios,
8279
		      pipe_config->gmch_pfit.lvds_border_bits);
8280
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
8281
		      pipe_config->pch_pfit.pos,
8282
		      pipe_config->pch_pfit.size,
8283
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
8284
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
8285
}
8286
 
8287
static bool check_encoder_cloning(struct drm_crtc *crtc)
8288
{
8289
	int num_encoders = 0;
8290
	bool uncloneable_encoders = false;
8291
	struct intel_encoder *encoder;
8292
 
8293
	list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
8294
			    base.head) {
8295
		if (&encoder->new_crtc->base != crtc)
8296
			continue;
8297
 
8298
		num_encoders++;
8299
		if (!encoder->cloneable)
8300
			uncloneable_encoders = true;
8301
	}
8302
 
8303
	return !(num_encoders > 1 && uncloneable_encoders);
8304
}
8305
 
3746 Serge 8306
static struct intel_crtc_config *
8307
intel_modeset_pipe_config(struct drm_crtc *crtc,
8308
			  struct drm_framebuffer *fb,
3031 serge 8309
			    struct drm_display_mode *mode)
8310
{
8311
	struct drm_device *dev = crtc->dev;
8312
	struct intel_encoder *encoder;
3746 Serge 8313
	struct intel_crtc_config *pipe_config;
4104 Serge 8314
	int plane_bpp, ret = -EINVAL;
8315
	bool retry = true;
3031 serge 8316
 
4104 Serge 8317
	if (!check_encoder_cloning(crtc)) {
8318
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
8319
		return ERR_PTR(-EINVAL);
8320
	}
8321
 
3746 Serge 8322
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8323
	if (!pipe_config)
3031 serge 8324
		return ERR_PTR(-ENOMEM);
8325
 
3746 Serge 8326
	drm_mode_copy(&pipe_config->adjusted_mode, mode);
8327
	drm_mode_copy(&pipe_config->requested_mode, mode);
4104 Serge 8328
	pipe_config->cpu_transcoder =
8329
		(enum transcoder) to_intel_crtc(crtc)->pipe;
8330
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
3746 Serge 8331
 
4104 Serge 8332
	/*
8333
	 * Sanitize sync polarity flags based on requested ones. If neither
8334
	 * positive or negative polarity is requested, treat this as meaning
8335
	 * negative polarity.
8336
	 */
8337
	if (!(pipe_config->adjusted_mode.flags &
8338
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8339
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8340
 
8341
	if (!(pipe_config->adjusted_mode.flags &
8342
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8343
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8344
 
8345
	/* Compute a starting value for pipe_config->pipe_bpp taking the source
8346
	 * plane pixel format and any sink constraints into account. Returns the
8347
	 * source plane bpp so that dithering can be selected on mismatches
8348
	 * after encoders and crtc also have had their say. */
8349
	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
8350
					      fb, pipe_config);
3746 Serge 8351
	if (plane_bpp < 0)
8352
		goto fail;
8353
 
4104 Serge 8354
encoder_retry:
8355
	/* Ensure the port clock defaults are reset when retrying. */
8356
	pipe_config->port_clock = 0;
8357
	pipe_config->pixel_multiplier = 1;
8358
 
8359
	/* Fill in default crtc timings, allow encoders to overwrite them. */
8360
	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
8361
 
3031 serge 8362
	/* Pass our mode to the connectors and the CRTC to give them a chance to
8363
	 * adjust it according to limitations or connector properties, and also
8364
	 * a chance to reject the mode entirely.
2330 Serge 8365
	 */
3031 serge 8366
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8367
			    base.head) {
2327 Serge 8368
 
3031 serge 8369
		if (&encoder->new_crtc->base != crtc)
8370
			continue;
3746 Serge 8371
 
8372
			if (!(encoder->compute_config(encoder, pipe_config))) {
8373
				DRM_DEBUG_KMS("Encoder config failure\n");
8374
				goto fail;
8375
			}
8376
		}
8377
 
4104 Serge 8378
	/* Set default port clock if not overwritten by the encoder. Needs to be
8379
	 * done afterwards in case the encoder adjusts the mode. */
8380
	if (!pipe_config->port_clock)
8381
		pipe_config->port_clock = pipe_config->adjusted_mode.clock;
2327 Serge 8382
 
4104 Serge 8383
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8384
	if (ret < 0) {
3031 serge 8385
		DRM_DEBUG_KMS("CRTC fixup failed\n");
8386
		goto fail;
8387
	}
2327 Serge 8388
 
4104 Serge 8389
	if (ret == RETRY) {
8390
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
8391
			ret = -EINVAL;
8392
			goto fail;
8393
		}
8394
 
8395
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
8396
		retry = false;
8397
		goto encoder_retry;
8398
	}
8399
 
3746 Serge 8400
	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
8401
	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
8402
		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
8403
 
8404
	return pipe_config;
3031 serge 8405
fail:
3746 Serge 8406
	kfree(pipe_config);
4104 Serge 8407
	return ERR_PTR(ret);
3031 serge 8408
}
2327 Serge 8409
 
3031 serge 8410
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
8411
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
8412
static void
8413
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
8414
			     unsigned *prepare_pipes, unsigned *disable_pipes)
8415
{
8416
	struct intel_crtc *intel_crtc;
8417
	struct drm_device *dev = crtc->dev;
8418
	struct intel_encoder *encoder;
8419
	struct intel_connector *connector;
8420
	struct drm_crtc *tmp_crtc;
8421
 
8422
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
8423
 
8424
	/* Check which crtcs have changed outputs connected to them, these need
8425
	 * to be part of the prepare_pipes mask. We don't (yet) support global
8426
	 * modeset across multiple crtcs, so modeset_pipes will only have one
8427
	 * bit set at most. */
8428
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8429
			    base.head) {
8430
		if (connector->base.encoder == &connector->new_encoder->base)
8431
			continue;
8432
 
8433
		if (connector->base.encoder) {
8434
			tmp_crtc = connector->base.encoder->crtc;
8435
 
8436
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
8437
		}
8438
 
8439
		if (connector->new_encoder)
8440
			*prepare_pipes |=
8441
				1 << connector->new_encoder->new_crtc->pipe;
8442
	}
8443
 
8444
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8445
			    base.head) {
8446
		if (encoder->base.crtc == &encoder->new_crtc->base)
8447
			continue;
8448
 
8449
		if (encoder->base.crtc) {
8450
			tmp_crtc = encoder->base.crtc;
8451
 
8452
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
8453
		}
8454
 
8455
		if (encoder->new_crtc)
8456
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
8457
	}
8458
 
8459
	/* Check for any pipes that will be fully disabled ... */
8460
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
8461
			    base.head) {
8462
		bool used = false;
8463
 
8464
		/* Don't try to disable disabled crtcs. */
8465
		if (!intel_crtc->base.enabled)
8466
			continue;
8467
 
8468
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8469
				    base.head) {
8470
			if (encoder->new_crtc == intel_crtc)
8471
				used = true;
8472
		}
8473
 
8474
		if (!used)
8475
			*disable_pipes |= 1 << intel_crtc->pipe;
8476
	}
8477
 
8478
 
8479
	/* set_mode is also used to update properties on life display pipes. */
8480
	intel_crtc = to_intel_crtc(crtc);
8481
	if (crtc->enabled)
8482
		*prepare_pipes |= 1 << intel_crtc->pipe;
8483
 
3746 Serge 8484
	/*
8485
	 * For simplicity do a full modeset on any pipe where the output routing
8486
	 * changed. We could be more clever, but that would require us to be
8487
	 * more careful with calling the relevant encoder->mode_set functions.
8488
	 */
3031 serge 8489
	if (*prepare_pipes)
8490
		*modeset_pipes = *prepare_pipes;
8491
 
8492
	/* ... and mask these out. */
8493
	*modeset_pipes &= ~(*disable_pipes);
8494
	*prepare_pipes &= ~(*disable_pipes);
3746 Serge 8495
 
8496
	/*
8497
	 * HACK: We don't (yet) fully support global modesets. intel_set_config
8498
	 * obies this rule, but the modeset restore mode of
8499
	 * intel_modeset_setup_hw_state does not.
8500
	 */
8501
	*modeset_pipes &= 1 << intel_crtc->pipe;
8502
	*prepare_pipes &= 1 << intel_crtc->pipe;
4104 Serge 8503
 
8504
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
8505
		      *modeset_pipes, *prepare_pipes, *disable_pipes);
2330 Serge 8506
}
2327 Serge 8507
 
3031 serge 8508
static bool intel_crtc_in_use(struct drm_crtc *crtc)
2330 Serge 8509
{
3031 serge 8510
	struct drm_encoder *encoder;
2330 Serge 8511
	struct drm_device *dev = crtc->dev;
2327 Serge 8512
 
3031 serge 8513
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
8514
		if (encoder->crtc == crtc)
8515
			return true;
8516
 
8517
	return false;
8518
}
8519
 
8520
static void
8521
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
8522
{
8523
	struct intel_encoder *intel_encoder;
8524
	struct intel_crtc *intel_crtc;
8525
	struct drm_connector *connector;
8526
 
8527
	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
8528
			    base.head) {
8529
		if (!intel_encoder->base.crtc)
8530
			continue;
8531
 
8532
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
8533
 
8534
		if (prepare_pipes & (1 << intel_crtc->pipe))
8535
			intel_encoder->connectors_active = false;
8536
	}
8537
 
8538
	intel_modeset_commit_output_state(dev);
8539
 
8540
	/* Update computed state. */
8541
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
8542
			    base.head) {
8543
		intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
8544
	}
8545
 
8546
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
8547
		if (!connector->encoder || !connector->encoder->crtc)
8548
			continue;
8549
 
8550
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
8551
 
8552
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
8553
			struct drm_property *dpms_property =
8554
				dev->mode_config.dpms_property;
8555
 
8556
			connector->dpms = DRM_MODE_DPMS_ON;
3243 Serge 8557
			drm_object_property_set_value(&connector->base,
3031 serge 8558
							 dpms_property,
8559
							 DRM_MODE_DPMS_ON);
8560
 
8561
			intel_encoder = to_intel_encoder(connector->encoder);
8562
			intel_encoder->connectors_active = true;
8563
		}
8564
	}
8565
 
8566
}
8567
 
4104 Serge 8568
static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
8569
				    struct intel_crtc_config *new)
8570
{
8571
	int clock1, clock2, diff;
8572
 
8573
	clock1 = cur->adjusted_mode.clock;
8574
	clock2 = new->adjusted_mode.clock;
8575
 
8576
	if (clock1 == clock2)
8577
		return true;
8578
 
8579
	if (!clock1 || !clock2)
8580
		return false;
8581
 
8582
	diff = abs(clock1 - clock2);
8583
 
8584
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8585
		return true;
8586
 
8587
	return false;
8588
}
8589
 
3031 serge 8590
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
8591
	list_for_each_entry((intel_crtc), \
8592
			    &(dev)->mode_config.crtc_list, \
8593
			    base.head) \
4104 Serge 8594
		if (mask & (1 <<(intel_crtc)->pipe))
3031 serge 8595
 
3746 Serge 8596
static bool
4104 Serge 8597
intel_pipe_config_compare(struct drm_device *dev,
8598
			  struct intel_crtc_config *current_config,
3746 Serge 8599
			  struct intel_crtc_config *pipe_config)
8600
{
4104 Serge 8601
#define PIPE_CONF_CHECK_X(name)	\
8602
	if (current_config->name != pipe_config->name) { \
8603
		DRM_ERROR("mismatch in " #name " " \
8604
			  "(expected 0x%08x, found 0x%08x)\n", \
8605
			  current_config->name, \
8606
			  pipe_config->name); \
8607
		return false; \
3746 Serge 8608
	}
8609
 
4104 Serge 8610
#define PIPE_CONF_CHECK_I(name)	\
8611
	if (current_config->name != pipe_config->name) { \
8612
		DRM_ERROR("mismatch in " #name " " \
8613
			  "(expected %i, found %i)\n", \
8614
			  current_config->name, \
8615
			  pipe_config->name); \
8616
		return false; \
8617
	}
8618
 
8619
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
8620
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
8621
		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
8622
			  "(expected %i, found %i)\n", \
8623
			  current_config->name & (mask), \
8624
			  pipe_config->name & (mask)); \
8625
		return false; \
8626
	}
8627
 
8628
#define PIPE_CONF_QUIRK(quirk)	\
8629
	((current_config->quirks | pipe_config->quirks) & (quirk))
8630
 
8631
	PIPE_CONF_CHECK_I(cpu_transcoder);
8632
 
8633
	PIPE_CONF_CHECK_I(has_pch_encoder);
8634
	PIPE_CONF_CHECK_I(fdi_lanes);
8635
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
8636
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
8637
	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
8638
	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
8639
	PIPE_CONF_CHECK_I(fdi_m_n.tu);
8640
 
8641
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
8642
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
8643
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
8644
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
8645
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
8646
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
8647
 
8648
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
8649
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
8650
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
8651
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
8652
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
8653
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
8654
 
8655
		PIPE_CONF_CHECK_I(pixel_multiplier);
8656
 
8657
	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8658
			      DRM_MODE_FLAG_INTERLACE);
8659
 
8660
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8661
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8662
				      DRM_MODE_FLAG_PHSYNC);
8663
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8664
				      DRM_MODE_FLAG_NHSYNC);
8665
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8666
				      DRM_MODE_FLAG_PVSYNC);
8667
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8668
				      DRM_MODE_FLAG_NVSYNC);
8669
	}
8670
 
8671
	PIPE_CONF_CHECK_I(requested_mode.hdisplay);
8672
	PIPE_CONF_CHECK_I(requested_mode.vdisplay);
8673
 
8674
	PIPE_CONF_CHECK_I(gmch_pfit.control);
8675
	/* pfit ratios are autocomputed by the hw on gen4+ */
8676
	if (INTEL_INFO(dev)->gen < 4)
8677
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
8678
	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
8679
	PIPE_CONF_CHECK_I(pch_pfit.enabled);
8680
	if (current_config->pch_pfit.enabled) {
8681
	PIPE_CONF_CHECK_I(pch_pfit.pos);
8682
	PIPE_CONF_CHECK_I(pch_pfit.size);
8683
	}
8684
 
8685
	PIPE_CONF_CHECK_I(ips_enabled);
8686
 
8687
	PIPE_CONF_CHECK_I(shared_dpll);
8688
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8689
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8690
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8691
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8692
 
4280 Serge 8693
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
8694
		PIPE_CONF_CHECK_I(pipe_bpp);
8695
 
4104 Serge 8696
#undef PIPE_CONF_CHECK_X
8697
#undef PIPE_CONF_CHECK_I
8698
#undef PIPE_CONF_CHECK_FLAGS
8699
#undef PIPE_CONF_QUIRK
8700
 
8701
	if (!IS_HASWELL(dev)) {
8702
		if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
8703
			DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
8704
				  current_config->adjusted_mode.clock,
8705
				  pipe_config->adjusted_mode.clock);
8706
			return false;
8707
		}
8708
	}
8709
 
3746 Serge 8710
	return true;
8711
}
8712
 
4104 Serge 8713
static void
8714
check_connector_state(struct drm_device *dev)
3031 serge 8715
{
8716
	struct intel_connector *connector;
8717
 
8718
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8719
			    base.head) {
8720
		/* This also checks the encoder/connector hw state with the
8721
		 * ->get_hw_state callbacks. */
8722
		intel_connector_check_state(connector);
8723
 
8724
		WARN(&connector->new_encoder->base != connector->base.encoder,
8725
		     "connector's staged encoder doesn't match current encoder\n");
8726
	}
4104 Serge 8727
}
3031 serge 8728
 
4104 Serge 8729
static void
8730
check_encoder_state(struct drm_device *dev)
8731
{
8732
	struct intel_encoder *encoder;
8733
	struct intel_connector *connector;
8734
 
3031 serge 8735
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8736
			    base.head) {
8737
		bool enabled = false;
8738
		bool active = false;
8739
		enum pipe pipe, tracked_pipe;
8740
 
8741
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
8742
			      encoder->base.base.id,
8743
			      drm_get_encoder_name(&encoder->base));
8744
 
8745
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
8746
		     "encoder's stage crtc doesn't match current crtc\n");
8747
		WARN(encoder->connectors_active && !encoder->base.crtc,
8748
		     "encoder's active_connectors set, but no crtc\n");
8749
 
8750
		list_for_each_entry(connector, &dev->mode_config.connector_list,
8751
				    base.head) {
8752
			if (connector->base.encoder != &encoder->base)
8753
				continue;
8754
			enabled = true;
8755
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
8756
				active = true;
8757
		}
8758
		WARN(!!encoder->base.crtc != enabled,
8759
		     "encoder's enabled state mismatch "
8760
		     "(expected %i, found %i)\n",
8761
		     !!encoder->base.crtc, enabled);
8762
		WARN(active && !encoder->base.crtc,
8763
		     "active encoder with no crtc\n");
8764
 
8765
		WARN(encoder->connectors_active != active,
8766
		     "encoder's computed active state doesn't match tracked active state "
8767
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
8768
 
8769
		active = encoder->get_hw_state(encoder, &pipe);
8770
		WARN(active != encoder->connectors_active,
8771
		     "encoder's hw state doesn't match sw tracking "
8772
		     "(expected %i, found %i)\n",
8773
		     encoder->connectors_active, active);
8774
 
8775
		if (!encoder->base.crtc)
8776
			continue;
8777
 
8778
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
8779
		WARN(active && pipe != tracked_pipe,
8780
		     "active encoder's pipe doesn't match"
8781
		     "(expected %i, found %i)\n",
8782
		     tracked_pipe, pipe);
8783
 
8784
	}
4104 Serge 8785
}
3031 serge 8786
 
4104 Serge 8787
static void
8788
check_crtc_state(struct drm_device *dev)
8789
{
8790
	drm_i915_private_t *dev_priv = dev->dev_private;
8791
	struct intel_crtc *crtc;
8792
	struct intel_encoder *encoder;
8793
	struct intel_crtc_config pipe_config;
8794
 
3031 serge 8795
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
8796
			    base.head) {
8797
		bool enabled = false;
8798
		bool active = false;
8799
 
4104 Serge 8800
		memset(&pipe_config, 0, sizeof(pipe_config));
8801
 
3031 serge 8802
		DRM_DEBUG_KMS("[CRTC:%d]\n",
8803
			      crtc->base.base.id);
8804
 
8805
		WARN(crtc->active && !crtc->base.enabled,
8806
		     "active crtc, but not enabled in sw tracking\n");
8807
 
8808
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8809
				    base.head) {
8810
			if (encoder->base.crtc != &crtc->base)
8811
				continue;
8812
			enabled = true;
8813
			if (encoder->connectors_active)
8814
				active = true;
8815
		}
4104 Serge 8816
 
3031 serge 8817
		WARN(active != crtc->active,
8818
		     "crtc's computed active state doesn't match tracked active state "
8819
		     "(expected %i, found %i)\n", active, crtc->active);
8820
		WARN(enabled != crtc->base.enabled,
8821
		     "crtc's computed enabled state doesn't match tracked enabled state "
8822
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
8823
 
3746 Serge 8824
		active = dev_priv->display.get_pipe_config(crtc,
8825
							   &pipe_config);
8826
 
8827
		/* hw state is inconsistent with the pipe A quirk */
8828
		if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
8829
			active = crtc->active;
8830
 
4104 Serge 8831
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8832
				    base.head) {
8833
			enum pipe pipe;
8834
			if (encoder->base.crtc != &crtc->base)
8835
				continue;
8836
			if (encoder->get_config &&
8837
			    encoder->get_hw_state(encoder, &pipe))
8838
				encoder->get_config(encoder, &pipe_config);
8839
		}
8840
 
8841
		if (dev_priv->display.get_clock)
8842
			dev_priv->display.get_clock(crtc, &pipe_config);
8843
 
3746 Serge 8844
		WARN(crtc->active != active,
8845
		     "crtc active state doesn't match with hw state "
8846
		     "(expected %i, found %i)\n", crtc->active, active);
8847
 
4104 Serge 8848
		if (active &&
8849
		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
8850
			WARN(1, "pipe state doesn't match!\n");
8851
			intel_dump_pipe_config(crtc, &pipe_config,
8852
					       "[hw state]");
8853
			intel_dump_pipe_config(crtc, &crtc->config,
8854
					       "[sw state]");
8855
		}
3031 serge 8856
	}
8857
}
8858
 
4104 Serge 8859
static void
8860
check_shared_dpll_state(struct drm_device *dev)
8861
{
8862
	drm_i915_private_t *dev_priv = dev->dev_private;
8863
	struct intel_crtc *crtc;
8864
	struct intel_dpll_hw_state dpll_hw_state;
8865
	int i;
8866
 
8867
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8868
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
8869
		int enabled_crtcs = 0, active_crtcs = 0;
8870
		bool active;
8871
 
8872
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8873
 
8874
		DRM_DEBUG_KMS("%s\n", pll->name);
8875
 
8876
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
8877
 
8878
		WARN(pll->active > pll->refcount,
8879
		     "more active pll users than references: %i vs %i\n",
8880
		     pll->active, pll->refcount);
8881
		WARN(pll->active && !pll->on,
8882
		     "pll in active use but not on in sw tracking\n");
8883
		WARN(pll->on && !pll->active,
8884
		     "pll in on but not on in use in sw tracking\n");
8885
		WARN(pll->on != active,
8886
		     "pll on state mismatch (expected %i, found %i)\n",
8887
		     pll->on, active);
8888
 
8889
		list_for_each_entry(crtc, &dev->mode_config.crtc_list,
8890
				    base.head) {
8891
			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
8892
				enabled_crtcs++;
8893
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
8894
				active_crtcs++;
8895
		}
8896
		WARN(pll->active != active_crtcs,
8897
		     "pll active crtcs mismatch (expected %i, found %i)\n",
8898
		     pll->active, active_crtcs);
8899
		WARN(pll->refcount != enabled_crtcs,
8900
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
8901
		     pll->refcount, enabled_crtcs);
8902
 
8903
		WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
8904
				       sizeof(dpll_hw_state)),
8905
		     "pll hw state mismatch\n");
8906
	}
8907
}
8908
 
8909
void
8910
intel_modeset_check_state(struct drm_device *dev)
8911
{
8912
	check_connector_state(dev);
8913
	check_encoder_state(dev);
8914
	check_crtc_state(dev);
8915
	check_shared_dpll_state(dev);
8916
}
8917
 
3746 Serge 8918
static int __intel_set_mode(struct drm_crtc *crtc,
3031 serge 8919
		    struct drm_display_mode *mode,
8920
		    int x, int y, struct drm_framebuffer *fb)
8921
{
8922
	struct drm_device *dev = crtc->dev;
8923
	drm_i915_private_t *dev_priv = dev->dev_private;
3746 Serge 8924
	struct drm_display_mode *saved_mode, *saved_hwmode;
8925
	struct intel_crtc_config *pipe_config = NULL;
3031 serge 8926
	struct intel_crtc *intel_crtc;
8927
	unsigned disable_pipes, prepare_pipes, modeset_pipes;
3480 Serge 8928
	int ret = 0;
3031 serge 8929
 
3480 Serge 8930
	saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
8931
	if (!saved_mode)
8932
		return -ENOMEM;
8933
	saved_hwmode = saved_mode + 1;
8934
 
3031 serge 8935
	intel_modeset_affected_pipes(crtc, &modeset_pipes,
8936
				     &prepare_pipes, &disable_pipes);
8937
 
3480 Serge 8938
	*saved_hwmode = crtc->hwmode;
8939
	*saved_mode = crtc->mode;
3031 serge 8940
 
8941
	/* Hack: Because we don't (yet) support global modeset on multiple
8942
	 * crtcs, we don't keep track of the new mode for more than one crtc.
8943
	 * Hence simply check whether any bit is set in modeset_pipes in all the
8944
	 * pieces of code that are not yet converted to deal with mutliple crtcs
8945
	 * changing their mode at the same time. */
8946
	if (modeset_pipes) {
3746 Serge 8947
		pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
8948
		if (IS_ERR(pipe_config)) {
8949
			ret = PTR_ERR(pipe_config);
8950
			pipe_config = NULL;
8951
 
3480 Serge 8952
			goto out;
3031 serge 8953
		}
4104 Serge 8954
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
8955
				       "[modeset]");
3031 serge 8956
	}
8957
 
3746 Serge 8958
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
8959
		intel_crtc_disable(&intel_crtc->base);
8960
 
3031 serge 8961
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
8962
		if (intel_crtc->base.enabled)
8963
			dev_priv->display.crtc_disable(&intel_crtc->base);
8964
	}
8965
 
8966
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
8967
	 * to set it here already despite that we pass it down the callchain.
2330 Serge 8968
	 */
3746 Serge 8969
	if (modeset_pipes) {
3031 serge 8970
		crtc->mode = *mode;
3746 Serge 8971
		/* mode_set/enable/disable functions rely on a correct pipe
8972
		 * config. */
8973
		to_intel_crtc(crtc)->config = *pipe_config;
8974
	}
2327 Serge 8975
 
3031 serge 8976
	/* Only after disabling all output pipelines that will be changed can we
8977
	 * update the the output configuration. */
8978
	intel_modeset_update_state(dev, prepare_pipes);
8979
 
3243 Serge 8980
	if (dev_priv->display.modeset_global_resources)
8981
		dev_priv->display.modeset_global_resources(dev);
8982
 
3031 serge 8983
	/* Set up the DPLL and any encoders state that needs to adjust or depend
8984
	 * on the DPLL.
2330 Serge 8985
	 */
3031 serge 8986
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
3480 Serge 8987
		ret = intel_crtc_mode_set(&intel_crtc->base,
3031 serge 8988
					   x, y, fb);
3480 Serge 8989
		if (ret)
3031 serge 8990
		    goto done;
8991
	}
8992
 
8993
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
8994
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
8995
		dev_priv->display.crtc_enable(&intel_crtc->base);
8996
 
8997
	if (modeset_pipes) {
8998
		/* Store real post-adjustment hardware mode. */
3746 Serge 8999
		crtc->hwmode = pipe_config->adjusted_mode;
3031 serge 9000
 
9001
		/* Calculate and store various constants which
9002
		 * are later needed by vblank and swap-completion
9003
		 * timestamping. They are derived from true hwmode.
9004
		 */
9005
		drm_calc_timestamping_constants(crtc);
9006
	}
9007
 
9008
	/* FIXME: add subpixel order */
9009
done:
3480 Serge 9010
	if (ret && crtc->enabled) {
9011
		crtc->hwmode = *saved_hwmode;
9012
		crtc->mode = *saved_mode;
3031 serge 9013
	}
9014
 
3480 Serge 9015
out:
3746 Serge 9016
	kfree(pipe_config);
3480 Serge 9017
	kfree(saved_mode);
3031 serge 9018
	return ret;
2330 Serge 9019
}
2327 Serge 9020
 
4104 Serge 9021
static int intel_set_mode(struct drm_crtc *crtc,
3746 Serge 9022
		     struct drm_display_mode *mode,
9023
		     int x, int y, struct drm_framebuffer *fb)
9024
{
9025
	int ret;
9026
 
9027
	ret = __intel_set_mode(crtc, mode, x, y, fb);
9028
 
9029
	if (ret == 0)
9030
		intel_modeset_check_state(crtc->dev);
9031
 
9032
	return ret;
9033
}
9034
 
3480 Serge 9035
void intel_crtc_restore_mode(struct drm_crtc *crtc)
9036
{
9037
	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
9038
}
9039
 
3031 serge 9040
#undef for_each_intel_crtc_masked
2327 Serge 9041
 
3031 serge 9042
static void intel_set_config_free(struct intel_set_config *config)
9043
{
9044
	if (!config)
9045
		return;
9046
 
9047
	kfree(config->save_connector_encoders);
9048
	kfree(config->save_encoder_crtcs);
9049
	kfree(config);
9050
}
9051
 
9052
static int intel_set_config_save_state(struct drm_device *dev,
9053
				       struct intel_set_config *config)
9054
{
9055
	struct drm_encoder *encoder;
9056
	struct drm_connector *connector;
9057
	int count;
9058
 
9059
	config->save_encoder_crtcs =
9060
		kcalloc(dev->mode_config.num_encoder,
9061
			sizeof(struct drm_crtc *), GFP_KERNEL);
9062
	if (!config->save_encoder_crtcs)
9063
		return -ENOMEM;
9064
 
9065
	config->save_connector_encoders =
9066
		kcalloc(dev->mode_config.num_connector,
9067
			sizeof(struct drm_encoder *), GFP_KERNEL);
9068
	if (!config->save_connector_encoders)
9069
		return -ENOMEM;
9070
 
9071
	/* Copy data. Note that driver private data is not affected.
9072
	 * Should anything bad happen only the expected state is
9073
	 * restored, not the drivers personal bookkeeping.
9074
	 */
9075
	count = 0;
9076
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
9077
		config->save_encoder_crtcs[count++] = encoder->crtc;
9078
	}
9079
 
9080
	count = 0;
9081
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9082
		config->save_connector_encoders[count++] = connector->encoder;
9083
	}
9084
 
9085
	return 0;
9086
}
9087
 
9088
static void intel_set_config_restore_state(struct drm_device *dev,
9089
					   struct intel_set_config *config)
9090
{
9091
	struct intel_encoder *encoder;
9092
	struct intel_connector *connector;
9093
	int count;
9094
 
9095
	count = 0;
9096
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9097
		encoder->new_crtc =
9098
			to_intel_crtc(config->save_encoder_crtcs[count++]);
9099
	}
9100
 
9101
	count = 0;
9102
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
9103
		connector->new_encoder =
9104
			to_intel_encoder(config->save_connector_encoders[count++]);
9105
	}
9106
}
9107
 
3746 Serge 9108
static bool
4104 Serge 9109
is_crtc_connector_off(struct drm_mode_set *set)
3746 Serge 9110
{
9111
	int i;
9112
 
4104 Serge 9113
	if (set->num_connectors == 0)
9114
		return false;
9115
 
9116
	if (WARN_ON(set->connectors == NULL))
9117
		return false;
9118
 
9119
	for (i = 0; i < set->num_connectors; i++)
9120
		if (set->connectors[i]->encoder &&
9121
		    set->connectors[i]->encoder->crtc == set->crtc &&
9122
		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
3746 Serge 9123
			return true;
9124
 
9125
	return false;
9126
}
9127
 
3031 serge 9128
static void
9129
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
9130
				      struct intel_set_config *config)
9131
{
9132
 
9133
	/* We should be able to check here if the fb has the same properties
9134
	 * and then just flip_or_move it */
4104 Serge 9135
	if (is_crtc_connector_off(set)) {
3746 Serge 9136
			config->mode_changed = true;
9137
	} else if (set->crtc->fb != set->fb) {
3031 serge 9138
		/* If we have no fb then treat it as a full mode set */
9139
		if (set->crtc->fb == NULL) {
4104 Serge 9140
			struct intel_crtc *intel_crtc =
9141
				to_intel_crtc(set->crtc);
9142
 
9143
			if (intel_crtc->active && i915_fastboot) {
9144
				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
9145
				config->fb_changed = true;
9146
			} else {
9147
				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
3031 serge 9148
			config->mode_changed = true;
4104 Serge 9149
			}
3031 serge 9150
		} else if (set->fb == NULL) {
9151
			config->mode_changed = true;
3746 Serge 9152
		} else if (set->fb->pixel_format !=
9153
			   set->crtc->fb->pixel_format) {
3031 serge 9154
			config->mode_changed = true;
3746 Serge 9155
		} else {
3031 serge 9156
			config->fb_changed = true;
9157
	}
3746 Serge 9158
	}
3031 serge 9159
 
9160
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
9161
		config->fb_changed = true;
9162
 
9163
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
9164
		DRM_DEBUG_KMS("modes are different, full mode set\n");
9165
		drm_mode_debug_printmodeline(&set->crtc->mode);
9166
		drm_mode_debug_printmodeline(set->mode);
9167
		config->mode_changed = true;
9168
	}
4104 Serge 9169
 
9170
	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
9171
			set->crtc->base.id, config->mode_changed, config->fb_changed);
3031 serge 9172
}
9173
 
9174
static int
9175
intel_modeset_stage_output_state(struct drm_device *dev,
9176
				 struct drm_mode_set *set,
9177
				 struct intel_set_config *config)
9178
{
9179
	struct drm_crtc *new_crtc;
9180
	struct intel_connector *connector;
9181
	struct intel_encoder *encoder;
4104 Serge 9182
	int ro;
3031 serge 9183
 
3480 Serge 9184
	/* The upper layers ensure that we either disable a crtc or have a list
3031 serge 9185
	 * of connectors. For paranoia, double-check this. */
9186
	WARN_ON(!set->fb && (set->num_connectors != 0));
9187
	WARN_ON(set->fb && (set->num_connectors == 0));
9188
 
9189
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9190
			    base.head) {
9191
		/* Otherwise traverse passed in connector list and get encoders
9192
		 * for them. */
9193
		for (ro = 0; ro < set->num_connectors; ro++) {
9194
			if (set->connectors[ro] == &connector->base) {
9195
				connector->new_encoder = connector->encoder;
9196
				break;
9197
			}
9198
		}
9199
 
9200
		/* If we disable the crtc, disable all its connectors. Also, if
9201
		 * the connector is on the changing crtc but not on the new
9202
		 * connector list, disable it. */
9203
		if ((!set->fb || ro == set->num_connectors) &&
9204
		    connector->base.encoder &&
9205
		    connector->base.encoder->crtc == set->crtc) {
9206
			connector->new_encoder = NULL;
9207
 
9208
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
9209
				connector->base.base.id,
9210
				drm_get_connector_name(&connector->base));
9211
		}
9212
 
9213
 
9214
		if (&connector->new_encoder->base != connector->base.encoder) {
9215
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
9216
			config->mode_changed = true;
9217
		}
9218
	}
9219
	/* connector->new_encoder is now updated for all connectors. */
9220
 
9221
	/* Update crtc of enabled connectors. */
9222
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9223
			    base.head) {
9224
		if (!connector->new_encoder)
9225
			continue;
9226
 
9227
		new_crtc = connector->new_encoder->base.crtc;
9228
 
9229
		for (ro = 0; ro < set->num_connectors; ro++) {
9230
			if (set->connectors[ro] == &connector->base)
9231
				new_crtc = set->crtc;
9232
		}
9233
 
9234
		/* Make sure the new CRTC will work with the encoder */
9235
		if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
9236
					   new_crtc)) {
9237
			return -EINVAL;
9238
		}
9239
		connector->encoder->new_crtc = to_intel_crtc(new_crtc);
9240
 
9241
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
9242
			connector->base.base.id,
9243
			drm_get_connector_name(&connector->base),
9244
			new_crtc->base.id);
9245
	}
9246
 
9247
	/* Check for any encoders that needs to be disabled. */
9248
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9249
			    base.head) {
9250
		list_for_each_entry(connector,
9251
				    &dev->mode_config.connector_list,
9252
				    base.head) {
9253
			if (connector->new_encoder == encoder) {
9254
				WARN_ON(!connector->new_encoder->new_crtc);
9255
 
9256
				goto next_encoder;
9257
			}
9258
		}
9259
		encoder->new_crtc = NULL;
9260
next_encoder:
9261
		/* Only now check for crtc changes so we don't miss encoders
9262
		 * that will be disabled. */
9263
		if (&encoder->new_crtc->base != encoder->base.crtc) {
9264
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
9265
			config->mode_changed = true;
9266
		}
9267
	}
9268
	/* Now we've also updated encoder->new_crtc for all encoders. */
9269
 
9270
	return 0;
9271
}
9272
 
9273
static int intel_crtc_set_config(struct drm_mode_set *set)
9274
{
9275
	struct drm_device *dev;
9276
	struct drm_mode_set save_set;
9277
	struct intel_set_config *config;
9278
	int ret;
9279
 
9280
	BUG_ON(!set);
9281
	BUG_ON(!set->crtc);
9282
	BUG_ON(!set->crtc->helper_private);
9283
 
3480 Serge 9284
	/* Enforce sane interface api - has been abused by the fb helper. */
9285
	BUG_ON(!set->mode && set->fb);
9286
	BUG_ON(set->fb && set->num_connectors == 0);
3031 serge 9287
 
9288
	if (set->fb) {
9289
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
9290
				set->crtc->base.id, set->fb->base.id,
9291
				(int)set->num_connectors, set->x, set->y);
9292
	} else {
9293
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
9294
	}
9295
 
9296
	dev = set->crtc->dev;
9297
 
9298
	ret = -ENOMEM;
9299
	config = kzalloc(sizeof(*config), GFP_KERNEL);
9300
	if (!config)
9301
		goto out_config;
9302
 
9303
	ret = intel_set_config_save_state(dev, config);
9304
	if (ret)
9305
		goto out_config;
9306
 
9307
	save_set.crtc = set->crtc;
9308
	save_set.mode = &set->crtc->mode;
9309
	save_set.x = set->crtc->x;
9310
	save_set.y = set->crtc->y;
9311
	save_set.fb = set->crtc->fb;
9312
 
9313
	/* Compute whether we need a full modeset, only an fb base update or no
9314
	 * change at all. In the future we might also check whether only the
9315
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
9316
	 * such cases. */
9317
	intel_set_config_compute_mode_changes(set, config);
9318
 
9319
	ret = intel_modeset_stage_output_state(dev, set, config);
9320
	if (ret)
9321
		goto fail;
9322
 
9323
	if (config->mode_changed) {
3480 Serge 9324
		ret = intel_set_mode(set->crtc, set->mode,
9325
				     set->x, set->y, set->fb);
3031 serge 9326
	} else if (config->fb_changed) {
3746 Serge 9327
//       intel_crtc_wait_for_pending_flips(set->crtc);
9328
 
3031 serge 9329
		ret = intel_pipe_set_base(set->crtc,
9330
					  set->x, set->y, set->fb);
9331
	}
9332
 
3746 Serge 9333
	if (ret) {
4104 Serge 9334
		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
3746 Serge 9335
			  set->crtc->base.id, ret);
3031 serge 9336
fail:
9337
	intel_set_config_restore_state(dev, config);
9338
 
9339
	/* Try to restore the config */
9340
	if (config->mode_changed &&
3480 Serge 9341
	    intel_set_mode(save_set.crtc, save_set.mode,
3031 serge 9342
			    save_set.x, save_set.y, save_set.fb))
9343
		DRM_ERROR("failed to restore config after modeset failure\n");
3746 Serge 9344
	}
3031 serge 9345
 
9346
out_config:
9347
	intel_set_config_free(config);
9348
	return ret;
9349
}
9350
 
2330 Serge 9351
static const struct drm_crtc_funcs intel_crtc_funcs = {
9352
//	.cursor_set = intel_crtc_cursor_set,
4557 Serge 9353
	.cursor_move = intel_crtc_cursor_move,
2330 Serge 9354
	.gamma_set = intel_crtc_gamma_set,
3031 serge 9355
	.set_config = intel_crtc_set_config,
2330 Serge 9356
	.destroy = intel_crtc_destroy,
9357
//	.page_flip = intel_crtc_page_flip,
9358
};
2327 Serge 9359
 
3243 Serge 9360
static void intel_cpu_pll_init(struct drm_device *dev)
9361
{
3480 Serge 9362
	if (HAS_DDI(dev))
3243 Serge 9363
		intel_ddi_pll_init(dev);
9364
}
9365
 
4104 Serge 9366
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
9367
				      struct intel_shared_dpll *pll,
9368
				      struct intel_dpll_hw_state *hw_state)
3031 serge 9369
{
4104 Serge 9370
	uint32_t val;
3031 serge 9371
 
4104 Serge 9372
	val = I915_READ(PCH_DPLL(pll->id));
9373
	hw_state->dpll = val;
9374
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
9375
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
9376
 
9377
	return val & DPLL_VCO_ENABLE;
9378
}
9379
 
9380
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
9381
				  struct intel_shared_dpll *pll)
9382
{
9383
	I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
9384
	I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
9385
}
9386
 
9387
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
9388
				struct intel_shared_dpll *pll)
9389
{
9390
	/* PCH refclock must be enabled first */
9391
	assert_pch_refclk_enabled(dev_priv);
9392
 
9393
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
9394
 
9395
	/* Wait for the clocks to stabilize. */
9396
	POSTING_READ(PCH_DPLL(pll->id));
9397
	udelay(150);
9398
 
9399
	/* The pixel multiplier can only be updated once the
9400
	 * DPLL is enabled and the clocks are stable.
9401
	 *
9402
	 * So write it again.
9403
	 */
9404
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
9405
	POSTING_READ(PCH_DPLL(pll->id));
9406
	udelay(200);
9407
}
9408
 
9409
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
9410
				 struct intel_shared_dpll *pll)
9411
{
9412
	struct drm_device *dev = dev_priv->dev;
9413
	struct intel_crtc *crtc;
9414
 
9415
	/* Make sure no transcoder isn't still depending on us. */
9416
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
9417
		if (intel_crtc_to_shared_dpll(crtc) == pll)
9418
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
3031 serge 9419
	}
9420
 
4104 Serge 9421
	I915_WRITE(PCH_DPLL(pll->id), 0);
9422
	POSTING_READ(PCH_DPLL(pll->id));
9423
	udelay(200);
9424
}
9425
 
9426
static char *ibx_pch_dpll_names[] = {
9427
	"PCH DPLL A",
9428
	"PCH DPLL B",
9429
};
9430
 
9431
static void ibx_pch_dpll_init(struct drm_device *dev)
9432
{
9433
	struct drm_i915_private *dev_priv = dev->dev_private;
9434
	int i;
9435
 
9436
	dev_priv->num_shared_dpll = 2;
9437
 
9438
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9439
		dev_priv->shared_dplls[i].id = i;
9440
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
9441
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
9442
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
9443
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
9444
		dev_priv->shared_dplls[i].get_hw_state =
9445
			ibx_pch_dpll_get_hw_state;
3031 serge 9446
	}
9447
}
9448
 
4104 Serge 9449
static void intel_shared_dpll_init(struct drm_device *dev)
9450
{
9451
	struct drm_i915_private *dev_priv = dev->dev_private;
9452
 
9453
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
9454
		ibx_pch_dpll_init(dev);
9455
	else
9456
		dev_priv->num_shared_dpll = 0;
9457
 
9458
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
9459
	DRM_DEBUG_KMS("%i shared PLLs initialized\n",
9460
		      dev_priv->num_shared_dpll);
9461
}
9462
 
2330 Serge 9463
static void intel_crtc_init(struct drm_device *dev, int pipe)
9464
{
9465
	drm_i915_private_t *dev_priv = dev->dev_private;
9466
	struct intel_crtc *intel_crtc;
9467
	int i;
2327 Serge 9468
 
2330 Serge 9469
	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
9470
	if (intel_crtc == NULL)
9471
		return;
2327 Serge 9472
 
2330 Serge 9473
	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
2327 Serge 9474
 
2330 Serge 9475
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
9476
	for (i = 0; i < 256; i++) {
9477
		intel_crtc->lut_r[i] = i;
9478
		intel_crtc->lut_g[i] = i;
9479
		intel_crtc->lut_b[i] = i;
9480
	}
2327 Serge 9481
 
2330 Serge 9482
	/* Swap pipes & planes for FBC on pre-965 */
9483
	intel_crtc->pipe = pipe;
9484
	intel_crtc->plane = pipe;
9485
	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
9486
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
9487
		intel_crtc->plane = !pipe;
9488
	}
2327 Serge 9489
 
2330 Serge 9490
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
9491
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
9492
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
9493
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 9494
 
2330 Serge 9495
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
9496
}
2327 Serge 9497
 
3031 serge 9498
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
9499
				struct drm_file *file)
9500
{
9501
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
9502
	struct drm_mode_object *drmmode_obj;
9503
	struct intel_crtc *crtc;
2327 Serge 9504
 
3482 Serge 9505
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
9506
		return -ENODEV;
9507
 
3031 serge 9508
	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
9509
			DRM_MODE_OBJECT_CRTC);
2327 Serge 9510
 
3031 serge 9511
	if (!drmmode_obj) {
9512
		DRM_ERROR("no such CRTC id\n");
9513
		return -EINVAL;
9514
	}
2327 Serge 9515
 
3031 serge 9516
	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
9517
	pipe_from_crtc_id->pipe = crtc->pipe;
2327 Serge 9518
 
3031 serge 9519
	return 0;
9520
}
2327 Serge 9521
 
3031 serge 9522
static int intel_encoder_clones(struct intel_encoder *encoder)
2330 Serge 9523
{
3031 serge 9524
	struct drm_device *dev = encoder->base.dev;
9525
	struct intel_encoder *source_encoder;
2330 Serge 9526
	int index_mask = 0;
9527
	int entry = 0;
2327 Serge 9528
 
3031 serge 9529
	list_for_each_entry(source_encoder,
9530
			    &dev->mode_config.encoder_list, base.head) {
9531
 
9532
		if (encoder == source_encoder)
2330 Serge 9533
			index_mask |= (1 << entry);
3031 serge 9534
 
9535
		/* Intel hw has only one MUX where enocoders could be cloned. */
9536
		if (encoder->cloneable && source_encoder->cloneable)
9537
			index_mask |= (1 << entry);
9538
 
2330 Serge 9539
		entry++;
9540
	}
2327 Serge 9541
 
2330 Serge 9542
	return index_mask;
9543
}
2327 Serge 9544
 
2330 Serge 9545
static bool has_edp_a(struct drm_device *dev)
9546
{
9547
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 9548
 
2330 Serge 9549
	if (!IS_MOBILE(dev))
9550
		return false;
2327 Serge 9551
 
2330 Serge 9552
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
9553
		return false;
2327 Serge 9554
 
2330 Serge 9555
	if (IS_GEN5(dev) &&
9556
	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
9557
		return false;
2327 Serge 9558
 
2330 Serge 9559
	return true;
9560
}
2327 Serge 9561
 
2330 Serge 9562
static void intel_setup_outputs(struct drm_device *dev)
9563
{
9564
	struct drm_i915_private *dev_priv = dev->dev_private;
9565
	struct intel_encoder *encoder;
9566
	bool dpd_is_edp = false;
2327 Serge 9567
 
4104 Serge 9568
	intel_lvds_init(dev);
2327 Serge 9569
 
3746 Serge 9570
	if (!IS_ULT(dev))
2330 Serge 9571
	intel_crt_init(dev);
2327 Serge 9572
 
3480 Serge 9573
	if (HAS_DDI(dev)) {
2330 Serge 9574
		int found;
2327 Serge 9575
 
3031 serge 9576
		/* Haswell uses DDI functions to detect digital outputs */
9577
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
9578
		/* DDI A only supports eDP */
9579
		if (found)
9580
			intel_ddi_init(dev, PORT_A);
9581
 
9582
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
9583
		 * register */
9584
		found = I915_READ(SFUSE_STRAP);
9585
 
9586
		if (found & SFUSE_STRAP_DDIB_DETECTED)
9587
			intel_ddi_init(dev, PORT_B);
9588
		if (found & SFUSE_STRAP_DDIC_DETECTED)
9589
			intel_ddi_init(dev, PORT_C);
9590
		if (found & SFUSE_STRAP_DDID_DETECTED)
9591
			intel_ddi_init(dev, PORT_D);
9592
	} else if (HAS_PCH_SPLIT(dev)) {
9593
		int found;
3243 Serge 9594
		dpd_is_edp = intel_dpd_is_edp(dev);
3031 serge 9595
 
3243 Serge 9596
		if (has_edp_a(dev))
9597
			intel_dp_init(dev, DP_A, PORT_A);
9598
 
3746 Serge 9599
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
2330 Serge 9600
			/* PCH SDVOB multiplex with HDMIB */
3031 serge 9601
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
2330 Serge 9602
			if (!found)
3746 Serge 9603
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
2330 Serge 9604
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
3031 serge 9605
				intel_dp_init(dev, PCH_DP_B, PORT_B);
2330 Serge 9606
		}
2327 Serge 9607
 
3746 Serge 9608
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
9609
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
2327 Serge 9610
 
3746 Serge 9611
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
9612
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
2327 Serge 9613
 
2330 Serge 9614
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
3031 serge 9615
			intel_dp_init(dev, PCH_DP_C, PORT_C);
2327 Serge 9616
 
3243 Serge 9617
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
3031 serge 9618
			intel_dp_init(dev, PCH_DP_D, PORT_D);
9619
	} else if (IS_VALLEYVIEW(dev)) {
3243 Serge 9620
		/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
4104 Serge 9621
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
9622
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
9623
					PORT_C);
3480 Serge 9624
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
4104 Serge 9625
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
9626
					      PORT_C);
9627
		}
3243 Serge 9628
 
3746 Serge 9629
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
9630
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
9631
					PORT_B);
3480 Serge 9632
			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
9633
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
3031 serge 9634
		}
2330 Serge 9635
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
9636
		bool found = false;
2327 Serge 9637
 
3746 Serge 9638
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 9639
			DRM_DEBUG_KMS("probing SDVOB\n");
3746 Serge 9640
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
2330 Serge 9641
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
9642
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
3746 Serge 9643
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
2330 Serge 9644
			}
2327 Serge 9645
 
4104 Serge 9646
			if (!found && SUPPORTS_INTEGRATED_DP(dev))
3031 serge 9647
				intel_dp_init(dev, DP_B, PORT_B);
2330 Serge 9648
			}
2327 Serge 9649
 
2330 Serge 9650
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 9651
 
3746 Serge 9652
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
2330 Serge 9653
			DRM_DEBUG_KMS("probing SDVOC\n");
3746 Serge 9654
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
2330 Serge 9655
		}
2327 Serge 9656
 
3746 Serge 9657
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
2327 Serge 9658
 
2330 Serge 9659
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
9660
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
3746 Serge 9661
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
2330 Serge 9662
			}
4104 Serge 9663
			if (SUPPORTS_INTEGRATED_DP(dev))
3031 serge 9664
				intel_dp_init(dev, DP_C, PORT_C);
2330 Serge 9665
			}
2327 Serge 9666
 
2330 Serge 9667
		if (SUPPORTS_INTEGRATED_DP(dev) &&
4104 Serge 9668
		    (I915_READ(DP_D) & DP_DETECTED))
3031 serge 9669
			intel_dp_init(dev, DP_D, PORT_D);
2330 Serge 9670
	} else if (IS_GEN2(dev))
9671
		intel_dvo_init(dev);
2327 Serge 9672
 
2330 Serge 9673
//   if (SUPPORTS_TV(dev))
9674
//       intel_tv_init(dev);
2327 Serge 9675
 
2330 Serge 9676
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9677
		encoder->base.possible_crtcs = encoder->crtc_mask;
9678
		encoder->base.possible_clones =
3031 serge 9679
			intel_encoder_clones(encoder);
2330 Serge 9680
	}
2327 Serge 9681
 
3243 Serge 9682
	intel_init_pch_refclk(dev);
9683
 
9684
	drm_helper_move_panel_connectors_to_head(dev);
2330 Serge 9685
}
9686
 
9687
 
9688
 
2335 Serge 9689
static const struct drm_framebuffer_funcs intel_fb_funcs = {
9690
//	.destroy = intel_user_framebuffer_destroy,
9691
//	.create_handle = intel_user_framebuffer_create_handle,
9692
};
2327 Serge 9693
 
2335 Serge 9694
int intel_framebuffer_init(struct drm_device *dev,
9695
			   struct intel_framebuffer *intel_fb,
2342 Serge 9696
			   struct drm_mode_fb_cmd2 *mode_cmd,
2335 Serge 9697
			   struct drm_i915_gem_object *obj)
9698
{
4104 Serge 9699
	int pitch_limit;
2335 Serge 9700
	int ret;
2327 Serge 9701
 
3243 Serge 9702
	if (obj->tiling_mode == I915_TILING_Y) {
9703
		DRM_DEBUG("hardware does not support tiling Y\n");
2335 Serge 9704
		return -EINVAL;
3243 Serge 9705
	}
2327 Serge 9706
 
3243 Serge 9707
	if (mode_cmd->pitches[0] & 63) {
9708
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
9709
			  mode_cmd->pitches[0]);
9710
		return -EINVAL;
9711
	}
9712
 
4104 Serge 9713
	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
9714
		pitch_limit = 32*1024;
9715
	} else if (INTEL_INFO(dev)->gen >= 4) {
9716
		if (obj->tiling_mode)
9717
			pitch_limit = 16*1024;
9718
		else
9719
			pitch_limit = 32*1024;
9720
	} else if (INTEL_INFO(dev)->gen >= 3) {
9721
		if (obj->tiling_mode)
9722
			pitch_limit = 8*1024;
9723
		else
9724
			pitch_limit = 16*1024;
9725
	} else
9726
		/* XXX DSPC is limited to 4k tiled */
9727
		pitch_limit = 8*1024;
9728
 
9729
	if (mode_cmd->pitches[0] > pitch_limit) {
9730
		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
9731
			  obj->tiling_mode ? "tiled" : "linear",
9732
			  mode_cmd->pitches[0], pitch_limit);
3243 Serge 9733
		return -EINVAL;
9734
	}
9735
 
9736
	if (obj->tiling_mode != I915_TILING_NONE &&
9737
	    mode_cmd->pitches[0] != obj->stride) {
9738
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
9739
			  mode_cmd->pitches[0], obj->stride);
2335 Serge 9740
			return -EINVAL;
3243 Serge 9741
	}
2327 Serge 9742
 
3243 Serge 9743
	/* Reject formats not supported by any plane early. */
2342 Serge 9744
	switch (mode_cmd->pixel_format) {
3243 Serge 9745
	case DRM_FORMAT_C8:
2342 Serge 9746
	case DRM_FORMAT_RGB565:
9747
	case DRM_FORMAT_XRGB8888:
3243 Serge 9748
	case DRM_FORMAT_ARGB8888:
9749
		break;
9750
	case DRM_FORMAT_XRGB1555:
9751
	case DRM_FORMAT_ARGB1555:
9752
		if (INTEL_INFO(dev)->gen > 3) {
4104 Serge 9753
			DRM_DEBUG("unsupported pixel format: %s\n",
9754
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 9755
			return -EINVAL;
9756
		}
9757
		break;
3031 serge 9758
	case DRM_FORMAT_XBGR8888:
3243 Serge 9759
	case DRM_FORMAT_ABGR8888:
2342 Serge 9760
	case DRM_FORMAT_XRGB2101010:
9761
	case DRM_FORMAT_ARGB2101010:
3243 Serge 9762
	case DRM_FORMAT_XBGR2101010:
9763
	case DRM_FORMAT_ABGR2101010:
9764
		if (INTEL_INFO(dev)->gen < 4) {
4104 Serge 9765
			DRM_DEBUG("unsupported pixel format: %s\n",
9766
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 9767
			return -EINVAL;
9768
		}
2335 Serge 9769
		break;
2342 Serge 9770
	case DRM_FORMAT_YUYV:
9771
	case DRM_FORMAT_UYVY:
9772
	case DRM_FORMAT_YVYU:
9773
	case DRM_FORMAT_VYUY:
3243 Serge 9774
		if (INTEL_INFO(dev)->gen < 5) {
4104 Serge 9775
			DRM_DEBUG("unsupported pixel format: %s\n",
9776
				  drm_get_format_name(mode_cmd->pixel_format));
3243 Serge 9777
			return -EINVAL;
9778
		}
2342 Serge 9779
		break;
2335 Serge 9780
	default:
4104 Serge 9781
		DRM_DEBUG("unsupported pixel format: %s\n",
9782
			  drm_get_format_name(mode_cmd->pixel_format));
2335 Serge 9783
		return -EINVAL;
9784
	}
2327 Serge 9785
 
3243 Serge 9786
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
9787
	if (mode_cmd->offsets[0] != 0)
9788
		return -EINVAL;
9789
 
3480 Serge 9790
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
9791
	intel_fb->obj = obj;
9792
 
2335 Serge 9793
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
9794
	if (ret) {
9795
		DRM_ERROR("framebuffer init failed %d\n", ret);
9796
		return ret;
9797
	}
2327 Serge 9798
 
2335 Serge 9799
	return 0;
9800
}
2327 Serge 9801
 
9802
 
2360 Serge 9803
static const struct drm_mode_config_funcs intel_mode_funcs = {
9804
	.fb_create = NULL /*intel_user_framebuffer_create*/,
3480 Serge 9805
	.output_poll_changed = intel_fb_output_poll_changed,
2360 Serge 9806
};
2327 Serge 9807
 
3031 serge 9808
/* Set up chip specific display functions */
9809
static void intel_init_display(struct drm_device *dev)
9810
{
9811
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 9812
 
4104 Serge 9813
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
9814
		dev_priv->display.find_dpll = g4x_find_best_dpll;
9815
	else if (IS_VALLEYVIEW(dev))
9816
		dev_priv->display.find_dpll = vlv_find_best_dpll;
9817
	else if (IS_PINEVIEW(dev))
9818
		dev_priv->display.find_dpll = pnv_find_best_dpll;
9819
	else
9820
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
9821
 
3480 Serge 9822
	if (HAS_DDI(dev)) {
3746 Serge 9823
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
3243 Serge 9824
		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
9825
		dev_priv->display.crtc_enable = haswell_crtc_enable;
9826
		dev_priv->display.crtc_disable = haswell_crtc_disable;
9827
		dev_priv->display.off = haswell_crtc_off;
9828
		dev_priv->display.update_plane = ironlake_update_plane;
9829
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 9830
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
4104 Serge 9831
		dev_priv->display.get_clock = ironlake_crtc_clock_get;
3031 serge 9832
		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
9833
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
9834
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
9835
		dev_priv->display.off = ironlake_crtc_off;
9836
		dev_priv->display.update_plane = ironlake_update_plane;
4104 Serge 9837
	} else if (IS_VALLEYVIEW(dev)) {
9838
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
4398 Serge 9839
		dev_priv->display.get_clock = vlv_crtc_clock_get;
4104 Serge 9840
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9841
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
9842
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
9843
		dev_priv->display.off = i9xx_crtc_off;
9844
		dev_priv->display.update_plane = i9xx_update_plane;
3031 serge 9845
	} else {
3746 Serge 9846
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
4104 Serge 9847
		dev_priv->display.get_clock = i9xx_crtc_clock_get;
3031 serge 9848
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9849
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
9850
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
9851
		dev_priv->display.off = i9xx_crtc_off;
9852
		dev_priv->display.update_plane = i9xx_update_plane;
9853
	}
2327 Serge 9854
 
3031 serge 9855
	/* Returns the core display clock speed */
9856
	if (IS_VALLEYVIEW(dev))
9857
		dev_priv->display.get_display_clock_speed =
9858
			valleyview_get_display_clock_speed;
9859
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
9860
		dev_priv->display.get_display_clock_speed =
9861
			i945_get_display_clock_speed;
9862
	else if (IS_I915G(dev))
9863
		dev_priv->display.get_display_clock_speed =
9864
			i915_get_display_clock_speed;
4104 Serge 9865
	else if (IS_I945GM(dev) || IS_845G(dev))
3031 serge 9866
		dev_priv->display.get_display_clock_speed =
9867
			i9xx_misc_get_display_clock_speed;
4104 Serge 9868
	else if (IS_PINEVIEW(dev))
9869
		dev_priv->display.get_display_clock_speed =
9870
			pnv_get_display_clock_speed;
3031 serge 9871
	else if (IS_I915GM(dev))
9872
		dev_priv->display.get_display_clock_speed =
9873
			i915gm_get_display_clock_speed;
9874
	else if (IS_I865G(dev))
9875
		dev_priv->display.get_display_clock_speed =
9876
			i865_get_display_clock_speed;
9877
	else if (IS_I85X(dev))
9878
		dev_priv->display.get_display_clock_speed =
9879
			i855_get_display_clock_speed;
9880
	else /* 852, 830 */
9881
		dev_priv->display.get_display_clock_speed =
9882
			i830_get_display_clock_speed;
2327 Serge 9883
 
3031 serge 9884
	if (HAS_PCH_SPLIT(dev)) {
9885
		if (IS_GEN5(dev)) {
9886
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
9887
			dev_priv->display.write_eld = ironlake_write_eld;
9888
		} else if (IS_GEN6(dev)) {
9889
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
9890
			dev_priv->display.write_eld = ironlake_write_eld;
9891
		} else if (IS_IVYBRIDGE(dev)) {
9892
			/* FIXME: detect B0+ stepping and use auto training */
9893
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
9894
			dev_priv->display.write_eld = ironlake_write_eld;
3243 Serge 9895
			dev_priv->display.modeset_global_resources =
9896
				ivb_modeset_global_resources;
3031 serge 9897
		} else if (IS_HASWELL(dev)) {
9898
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
9899
			dev_priv->display.write_eld = haswell_write_eld;
3480 Serge 9900
			dev_priv->display.modeset_global_resources =
9901
				haswell_modeset_global_resources;
9902
		}
3031 serge 9903
	} else if (IS_G4X(dev)) {
9904
		dev_priv->display.write_eld = g4x_write_eld;
9905
	}
2327 Serge 9906
 
3031 serge 9907
	/* Default just returns -ENODEV to indicate unsupported */
9908
//	dev_priv->display.queue_flip = intel_default_queue_flip;
2327 Serge 9909
 
9910
 
9911
 
9912
 
3031 serge 9913
}
9914
 
9915
/*
9916
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
9917
 * resume, or other times.  This quirk makes sure that's the case for
9918
 * affected systems.
9919
 */
9920
static void quirk_pipea_force(struct drm_device *dev)
2330 Serge 9921
{
9922
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 9923
 
3031 serge 9924
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9925
	DRM_INFO("applying pipe a force quirk\n");
9926
}
2327 Serge 9927
 
3031 serge 9928
/*
9929
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
9930
 */
9931
static void quirk_ssc_force_disable(struct drm_device *dev)
9932
{
9933
	struct drm_i915_private *dev_priv = dev->dev_private;
9934
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
9935
	DRM_INFO("applying lvds SSC disable quirk\n");
2330 Serge 9936
}
2327 Serge 9937
 
3031 serge 9938
/*
9939
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
9940
 * brightness value
9941
 */
9942
static void quirk_invert_brightness(struct drm_device *dev)
2330 Serge 9943
{
9944
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9945
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
9946
	DRM_INFO("applying inverted panel brightness quirk\n");
9947
}
2327 Serge 9948
 
4104 Serge 9949
/*
9950
 * Some machines (Dell XPS13) suffer broken backlight controls if
9951
 * BLM_PCH_PWM_ENABLE is set.
9952
 */
9953
static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
9954
{
9955
	struct drm_i915_private *dev_priv = dev->dev_private;
9956
	dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
9957
	DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
9958
}
9959
 
3031 serge 9960
struct intel_quirk {
9961
	int device;
9962
	int subsystem_vendor;
9963
	int subsystem_device;
9964
	void (*hook)(struct drm_device *dev);
9965
};
2327 Serge 9966
 
3031 serge 9967
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
9968
struct intel_dmi_quirk {
9969
	void (*hook)(struct drm_device *dev);
9970
	const struct dmi_system_id (*dmi_id_list)[];
9971
};
2327 Serge 9972
 
3031 serge 9973
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
9974
{
9975
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
9976
	return 1;
2330 Serge 9977
}
2327 Serge 9978
 
3031 serge 9979
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
9980
	{
9981
		.dmi_id_list = &(const struct dmi_system_id[]) {
9982
			{
9983
				.callback = intel_dmi_reverse_brightness,
9984
				.ident = "NCR Corporation",
9985
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
9986
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
9987
				},
9988
			},
9989
			{ }  /* terminating entry */
9990
		},
9991
		.hook = quirk_invert_brightness,
9992
	},
9993
};
2327 Serge 9994
 
3031 serge 9995
static struct intel_quirk intel_quirks[] = {
9996
	/* HP Mini needs pipe A force quirk (LP: #322104) */
9997
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
2327 Serge 9998
 
3031 serge 9999
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
10000
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
2327 Serge 10001
 
3031 serge 10002
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
10003
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
2327 Serge 10004
 
3031 serge 10005
	/* 830/845 need to leave pipe A & dpll A up */
10006
	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
10007
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
2327 Serge 10008
 
3031 serge 10009
	/* Lenovo U160 cannot use SSC on LVDS */
10010
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
2327 Serge 10011
 
3031 serge 10012
	/* Sony Vaio Y cannot use SSC on LVDS */
10013
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
2327 Serge 10014
 
3031 serge 10015
	/* Acer Aspire 5734Z must invert backlight brightness */
10016
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
3480 Serge 10017
 
10018
	/* Acer/eMachines G725 */
10019
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10020
 
10021
	/* Acer/eMachines e725 */
10022
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10023
 
10024
	/* Acer/Packard Bell NCL20 */
10025
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10026
 
10027
	/* Acer Aspire 4736Z */
10028
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
4104 Serge 10029
 
10030
	/* Dell XPS13 HD Sandy Bridge */
10031
	{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
10032
	/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
10033
	{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
3031 serge 10034
};
2327 Serge 10035
 
3031 serge 10036
static void intel_init_quirks(struct drm_device *dev)
2330 Serge 10037
{
3031 serge 10038
	struct pci_dev *d = dev->pdev;
10039
	int i;
2327 Serge 10040
 
3031 serge 10041
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
10042
		struct intel_quirk *q = &intel_quirks[i];
2327 Serge 10043
 
3031 serge 10044
		if (d->device == q->device &&
10045
		    (d->subsystem_vendor == q->subsystem_vendor ||
10046
		     q->subsystem_vendor == PCI_ANY_ID) &&
10047
		    (d->subsystem_device == q->subsystem_device ||
10048
		     q->subsystem_device == PCI_ANY_ID))
10049
			q->hook(dev);
10050
	}
2330 Serge 10051
}
2327 Serge 10052
 
3031 serge 10053
/* Disable the VGA plane that we never use */
10054
static void i915_disable_vga(struct drm_device *dev)
2330 Serge 10055
{
10056
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10057
	u8 sr1;
3480 Serge 10058
	u32 vga_reg = i915_vgacntrl_reg(dev);
2327 Serge 10059
 
3031 serge 10060
//   vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10061
    out8(SR01, VGA_SR_INDEX);
10062
    sr1 = in8(VGA_SR_DATA);
10063
    out8(sr1 | 1<<5, VGA_SR_DATA);
10064
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10065
	udelay(300);
2327 Serge 10066
 
3031 serge 10067
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
10068
	POSTING_READ(vga_reg);
2330 Serge 10069
}
10070
 
3031 serge 10071
void intel_modeset_init_hw(struct drm_device *dev)
2342 Serge 10072
{
4398 Serge 10073
	struct drm_i915_private *dev_priv = dev->dev_private;
10074
 
3480 Serge 10075
	intel_init_power_well(dev);
2342 Serge 10076
 
3031 serge 10077
	intel_prepare_ddi(dev);
2342 Serge 10078
 
3031 serge 10079
	intel_init_clock_gating(dev);
10080
 
4398 Serge 10081
	/* Enable the CRI clock source so we can get at the display */
10082
	if (IS_VALLEYVIEW(dev))
10083
		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
10084
			   DPLL_INTEGRATED_CRI_CLK_VLV);
10085
 
3482 Serge 10086
    mutex_lock(&dev->struct_mutex);
10087
    intel_enable_gt_powersave(dev);
10088
    mutex_unlock(&dev->struct_mutex);
2342 Serge 10089
}
10090
 
4398 Serge 10091
void intel_modeset_suspend_hw(struct drm_device *dev)
10092
{
10093
	intel_suspend_hw(dev);
10094
}
10095
 
3031 serge 10096
void intel_modeset_init(struct drm_device *dev)
2330 Serge 10097
{
3031 serge 10098
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 10099
	int i, j, ret;
2330 Serge 10100
 
3031 serge 10101
	drm_mode_config_init(dev);
2330 Serge 10102
 
3031 serge 10103
	dev->mode_config.min_width = 0;
10104
	dev->mode_config.min_height = 0;
2330 Serge 10105
 
3031 serge 10106
	dev->mode_config.preferred_depth = 24;
10107
	dev->mode_config.prefer_shadow = 1;
2330 Serge 10108
 
3031 serge 10109
	dev->mode_config.funcs = &intel_mode_funcs;
2330 Serge 10110
 
3031 serge 10111
	intel_init_quirks(dev);
2330 Serge 10112
 
3031 serge 10113
	intel_init_pm(dev);
2330 Serge 10114
 
3746 Serge 10115
	if (INTEL_INFO(dev)->num_pipes == 0)
10116
		return;
10117
 
3031 serge 10118
	intel_init_display(dev);
2330 Serge 10119
 
3031 serge 10120
	if (IS_GEN2(dev)) {
10121
		dev->mode_config.max_width = 2048;
10122
		dev->mode_config.max_height = 2048;
10123
	} else if (IS_GEN3(dev)) {
10124
		dev->mode_config.max_width = 4096;
10125
		dev->mode_config.max_height = 4096;
10126
	} else {
10127
		dev->mode_config.max_width = 8192;
10128
		dev->mode_config.max_height = 8192;
10129
	}
3480 Serge 10130
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
2330 Serge 10131
 
3031 serge 10132
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
3746 Serge 10133
		      INTEL_INFO(dev)->num_pipes,
10134
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
2330 Serge 10135
 
4104 Serge 10136
	for_each_pipe(i) {
3031 serge 10137
		intel_crtc_init(dev, i);
3746 Serge 10138
		for (j = 0; j < dev_priv->num_plane; j++) {
10139
			ret = intel_plane_init(dev, i, j);
3031 serge 10140
		if (ret)
4104 Serge 10141
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
10142
					      pipe_name(i), sprite_name(i, j), ret);
3746 Serge 10143
		}
2330 Serge 10144
	}
10145
 
3243 Serge 10146
	intel_cpu_pll_init(dev);
4104 Serge 10147
	intel_shared_dpll_init(dev);
2330 Serge 10148
 
3031 serge 10149
	/* Just disable it once at startup */
10150
	i915_disable_vga(dev);
10151
	intel_setup_outputs(dev);
3480 Serge 10152
 
10153
	/* Just in case the BIOS is doing something questionable. */
10154
	intel_disable_fbc(dev);
3031 serge 10155
}
2330 Serge 10156
 
3031 serge 10157
static void
10158
intel_connector_break_all_links(struct intel_connector *connector)
10159
{
10160
	connector->base.dpms = DRM_MODE_DPMS_OFF;
10161
	connector->base.encoder = NULL;
10162
	connector->encoder->connectors_active = false;
10163
	connector->encoder->base.crtc = NULL;
2330 Serge 10164
}
10165
 
3031 serge 10166
static void intel_enable_pipe_a(struct drm_device *dev)
2330 Serge 10167
{
3031 serge 10168
	struct intel_connector *connector;
10169
	struct drm_connector *crt = NULL;
10170
	struct intel_load_detect_pipe load_detect_temp;
2330 Serge 10171
 
3031 serge 10172
	/* We can't just switch on the pipe A, we need to set things up with a
10173
	 * proper mode and output configuration. As a gross hack, enable pipe A
10174
	 * by enabling the load detect pipe once. */
10175
	list_for_each_entry(connector,
10176
			    &dev->mode_config.connector_list,
10177
			    base.head) {
10178
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
10179
			crt = &connector->base;
10180
			break;
2330 Serge 10181
		}
10182
	}
10183
 
3031 serge 10184
	if (!crt)
10185
		return;
2330 Serge 10186
 
3031 serge 10187
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
10188
		intel_release_load_detect_pipe(crt, &load_detect_temp);
2327 Serge 10189
 
10190
 
10191
}
10192
 
3031 serge 10193
static bool
10194
intel_check_plane_mapping(struct intel_crtc *crtc)
2327 Serge 10195
{
3746 Serge 10196
	struct drm_device *dev = crtc->base.dev;
10197
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10198
	u32 reg, val;
2327 Serge 10199
 
3746 Serge 10200
	if (INTEL_INFO(dev)->num_pipes == 1)
3031 serge 10201
		return true;
2327 Serge 10202
 
3031 serge 10203
	reg = DSPCNTR(!crtc->plane);
10204
	val = I915_READ(reg);
2327 Serge 10205
 
3031 serge 10206
	if ((val & DISPLAY_PLANE_ENABLE) &&
10207
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
10208
		return false;
2327 Serge 10209
 
3031 serge 10210
	return true;
2327 Serge 10211
}
10212
 
3031 serge 10213
static void intel_sanitize_crtc(struct intel_crtc *crtc)
2327 Serge 10214
{
3031 serge 10215
	struct drm_device *dev = crtc->base.dev;
2327 Serge 10216
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10217
	u32 reg;
2327 Serge 10218
 
3031 serge 10219
	/* Clear any frame start delays used for debugging left by the BIOS */
3746 Serge 10220
	reg = PIPECONF(crtc->config.cpu_transcoder);
3031 serge 10221
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
2327 Serge 10222
 
3031 serge 10223
	/* We need to sanitize the plane -> pipe mapping first because this will
10224
	 * disable the crtc (and hence change the state) if it is wrong. Note
10225
	 * that gen4+ has a fixed plane -> pipe mapping.  */
10226
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
10227
		struct intel_connector *connector;
10228
		bool plane;
2327 Serge 10229
 
3031 serge 10230
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
10231
			      crtc->base.base.id);
2327 Serge 10232
 
3031 serge 10233
		/* Pipe has the wrong plane attached and the plane is active.
10234
		 * Temporarily change the plane mapping and disable everything
10235
		 * ...  */
10236
		plane = crtc->plane;
10237
		crtc->plane = !plane;
10238
		dev_priv->display.crtc_disable(&crtc->base);
10239
		crtc->plane = plane;
2342 Serge 10240
 
3031 serge 10241
		/* ... and break all links. */
10242
		list_for_each_entry(connector, &dev->mode_config.connector_list,
10243
				    base.head) {
10244
			if (connector->encoder->base.crtc != &crtc->base)
10245
				continue;
2327 Serge 10246
 
3031 serge 10247
			intel_connector_break_all_links(connector);
10248
		}
2327 Serge 10249
 
3031 serge 10250
		WARN_ON(crtc->active);
10251
		crtc->base.enabled = false;
10252
	}
2327 Serge 10253
 
3031 serge 10254
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
10255
	    crtc->pipe == PIPE_A && !crtc->active) {
10256
		/* BIOS forgot to enable pipe A, this mostly happens after
10257
		 * resume. Force-enable the pipe to fix this, the update_dpms
10258
		 * call below we restore the pipe to the right state, but leave
10259
		 * the required bits on. */
10260
		intel_enable_pipe_a(dev);
10261
	}
2327 Serge 10262
 
3031 serge 10263
	/* Adjust the state of the output pipe according to whether we
10264
	 * have active connectors/encoders. */
10265
	intel_crtc_update_dpms(&crtc->base);
2327 Serge 10266
 
3031 serge 10267
	if (crtc->active != crtc->base.enabled) {
10268
		struct intel_encoder *encoder;
2327 Serge 10269
 
3031 serge 10270
		/* This can happen either due to bugs in the get_hw_state
10271
		 * functions or because the pipe is force-enabled due to the
10272
		 * pipe A quirk. */
10273
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
10274
			      crtc->base.base.id,
10275
			      crtc->base.enabled ? "enabled" : "disabled",
10276
			      crtc->active ? "enabled" : "disabled");
2327 Serge 10277
 
3031 serge 10278
		crtc->base.enabled = crtc->active;
2327 Serge 10279
 
3031 serge 10280
		/* Because we only establish the connector -> encoder ->
10281
		 * crtc links if something is active, this means the
10282
		 * crtc is now deactivated. Break the links. connector
10283
		 * -> encoder links are only establish when things are
10284
		 *  actually up, hence no need to break them. */
10285
		WARN_ON(crtc->active);
2327 Serge 10286
 
3031 serge 10287
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
10288
			WARN_ON(encoder->connectors_active);
10289
			encoder->base.crtc = NULL;
10290
		}
10291
	}
2327 Serge 10292
}
10293
 
3031 serge 10294
static void intel_sanitize_encoder(struct intel_encoder *encoder)
2327 Serge 10295
{
3031 serge 10296
	struct intel_connector *connector;
10297
	struct drm_device *dev = encoder->base.dev;
2327 Serge 10298
 
3031 serge 10299
	/* We need to check both for a crtc link (meaning that the
10300
	 * encoder is active and trying to read from a pipe) and the
10301
	 * pipe itself being active. */
10302
	bool has_active_crtc = encoder->base.crtc &&
10303
		to_intel_crtc(encoder->base.crtc)->active;
2327 Serge 10304
 
3031 serge 10305
	if (encoder->connectors_active && !has_active_crtc) {
10306
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
10307
			      encoder->base.base.id,
10308
			      drm_get_encoder_name(&encoder->base));
2327 Serge 10309
 
3031 serge 10310
		/* Connector is active, but has no active pipe. This is
10311
		 * fallout from our resume register restoring. Disable
10312
		 * the encoder manually again. */
10313
		if (encoder->base.crtc) {
10314
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
10315
				      encoder->base.base.id,
10316
				      drm_get_encoder_name(&encoder->base));
10317
			encoder->disable(encoder);
10318
		}
2327 Serge 10319
 
3031 serge 10320
		/* Inconsistent output/port/pipe state happens presumably due to
10321
		 * a bug in one of the get_hw_state functions. Or someplace else
10322
		 * in our code, like the register restore mess on resume. Clamp
10323
		 * things to off as a safer default. */
10324
		list_for_each_entry(connector,
10325
				    &dev->mode_config.connector_list,
10326
				    base.head) {
10327
			if (connector->encoder != encoder)
10328
				continue;
2327 Serge 10329
 
3031 serge 10330
			intel_connector_break_all_links(connector);
10331
		}
10332
	}
10333
	/* Enabled encoders without active connectors will be fixed in
10334
	 * the crtc fixup. */
2327 Serge 10335
}
10336
 
3746 Serge 10337
void i915_redisable_vga(struct drm_device *dev)
10338
{
10339
	struct drm_i915_private *dev_priv = dev->dev_private;
10340
	u32 vga_reg = i915_vgacntrl_reg(dev);
10341
 
4104 Serge 10342
	/* This function can be called both from intel_modeset_setup_hw_state or
10343
	 * at a very early point in our resume sequence, where the power well
10344
	 * structures are not yet restored. Since this function is at a very
10345
	 * paranoid "someone might have enabled VGA while we were not looking"
10346
	 * level, just check if the power well is enabled instead of trying to
10347
	 * follow the "don't touch the power well if we don't need it" policy
10348
	 * the rest of the driver uses. */
10349
	if (HAS_POWER_WELL(dev) &&
10350
	    (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
10351
		return;
10352
 
3746 Serge 10353
	if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
10354
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
10355
		i915_disable_vga(dev);
10356
	}
10357
}
10358
 
4104 Serge 10359
static void intel_modeset_readout_hw_state(struct drm_device *dev)
2332 Serge 10360
{
10361
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 10362
	enum pipe pipe;
10363
	struct intel_crtc *crtc;
10364
	struct intel_encoder *encoder;
10365
	struct intel_connector *connector;
4104 Serge 10366
	int i;
2327 Serge 10367
 
3746 Serge 10368
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10369
			    base.head) {
10370
		memset(&crtc->config, 0, sizeof(crtc->config));
2327 Serge 10371
 
3746 Serge 10372
		crtc->active = dev_priv->display.get_pipe_config(crtc,
10373
								 &crtc->config);
2327 Serge 10374
 
3031 serge 10375
		crtc->base.enabled = crtc->active;
2330 Serge 10376
 
3031 serge 10377
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
10378
			      crtc->base.base.id,
10379
			      crtc->active ? "enabled" : "disabled");
2339 Serge 10380
	}
2332 Serge 10381
 
4104 Serge 10382
	/* FIXME: Smash this into the new shared dpll infrastructure. */
3480 Serge 10383
	if (HAS_DDI(dev))
3243 Serge 10384
		intel_ddi_setup_hw_pll_state(dev);
10385
 
4104 Serge 10386
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10387
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10388
 
10389
		pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
10390
		pll->active = 0;
10391
		list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10392
				    base.head) {
10393
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10394
				pll->active++;
10395
		}
10396
		pll->refcount = pll->active;
10397
 
10398
		DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
10399
			      pll->name, pll->refcount, pll->on);
10400
	}
10401
 
3031 serge 10402
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10403
			    base.head) {
10404
		pipe = 0;
2332 Serge 10405
 
3031 serge 10406
		if (encoder->get_hw_state(encoder, &pipe)) {
4104 Serge 10407
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
10408
			encoder->base.crtc = &crtc->base;
10409
			if (encoder->get_config)
10410
				encoder->get_config(encoder, &crtc->config);
3031 serge 10411
		} else {
10412
			encoder->base.crtc = NULL;
10413
		}
2332 Serge 10414
 
3031 serge 10415
		encoder->connectors_active = false;
10416
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
10417
			      encoder->base.base.id,
10418
			      drm_get_encoder_name(&encoder->base),
10419
			      encoder->base.crtc ? "enabled" : "disabled",
10420
			      pipe);
10421
	}
2332 Serge 10422
 
4104 Serge 10423
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10424
			    base.head) {
10425
		if (!crtc->active)
10426
			continue;
10427
		if (dev_priv->display.get_clock)
10428
			dev_priv->display.get_clock(crtc,
10429
						    &crtc->config);
10430
	}
10431
 
3031 serge 10432
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10433
			    base.head) {
10434
		if (connector->get_hw_state(connector)) {
10435
			connector->base.dpms = DRM_MODE_DPMS_ON;
10436
			connector->encoder->connectors_active = true;
10437
			connector->base.encoder = &connector->encoder->base;
10438
		} else {
10439
			connector->base.dpms = DRM_MODE_DPMS_OFF;
10440
			connector->base.encoder = NULL;
10441
		}
10442
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
10443
			      connector->base.base.id,
10444
			      drm_get_connector_name(&connector->base),
10445
			      connector->base.encoder ? "enabled" : "disabled");
2332 Serge 10446
	}
4104 Serge 10447
}
2332 Serge 10448
 
4104 Serge 10449
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
10450
 * and i915 state tracking structures. */
10451
void intel_modeset_setup_hw_state(struct drm_device *dev,
10452
				  bool force_restore)
10453
{
10454
	struct drm_i915_private *dev_priv = dev->dev_private;
10455
	enum pipe pipe;
10456
	struct drm_plane *plane;
10457
	struct intel_crtc *crtc;
10458
	struct intel_encoder *encoder;
10459
	int i;
10460
 
10461
	intel_modeset_readout_hw_state(dev);
10462
 
10463
	/*
10464
	 * Now that we have the config, copy it to each CRTC struct
10465
	 * Note that this could go away if we move to using crtc_config
10466
	 * checking everywhere.
10467
	 */
10468
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10469
			    base.head) {
10470
		if (crtc->active && i915_fastboot) {
10471
			intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
10472
 
10473
			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
10474
				      crtc->base.base.id);
10475
			drm_mode_debug_printmodeline(&crtc->base.mode);
10476
		}
10477
	}
10478
 
3031 serge 10479
	/* HW state is read out, now we need to sanitize this mess. */
10480
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10481
			    base.head) {
10482
		intel_sanitize_encoder(encoder);
2332 Serge 10483
	}
10484
 
3031 serge 10485
	for_each_pipe(pipe) {
10486
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
10487
		intel_sanitize_crtc(crtc);
4104 Serge 10488
		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
2332 Serge 10489
	}
10490
 
4104 Serge 10491
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10492
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10493
 
10494
		if (!pll->on || pll->active)
10495
			continue;
10496
 
10497
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
10498
 
10499
		pll->disable(dev_priv, pll);
10500
		pll->on = false;
10501
	}
10502
 
3243 Serge 10503
	if (force_restore) {
3746 Serge 10504
		/*
10505
		 * We need to use raw interfaces for restoring state to avoid
10506
		 * checking (bogus) intermediate states.
10507
		 */
3243 Serge 10508
		for_each_pipe(pipe) {
3746 Serge 10509
			struct drm_crtc *crtc =
10510
				dev_priv->pipe_to_crtc_mapping[pipe];
10511
 
10512
			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
10513
					 crtc->fb);
3243 Serge 10514
		}
3746 Serge 10515
		list_for_each_entry(plane, &dev->mode_config.plane_list, head)
10516
			intel_plane_restore(plane);
3243 Serge 10517
 
3746 Serge 10518
		i915_redisable_vga(dev);
3243 Serge 10519
	} else {
3031 serge 10520
	intel_modeset_update_staged_output_state(dev);
3243 Serge 10521
	}
2332 Serge 10522
 
3031 serge 10523
	intel_modeset_check_state(dev);
3243 Serge 10524
 
10525
	drm_mode_config_reset(dev);
2332 Serge 10526
}
10527
 
3031 serge 10528
void intel_modeset_gem_init(struct drm_device *dev)
2330 Serge 10529
{
3031 serge 10530
	intel_modeset_init_hw(dev);
2330 Serge 10531
 
3031 serge 10532
//   intel_setup_overlay(dev);
2330 Serge 10533
 
4539 Serge 10534
	mutex_lock(&dev->mode_config.mutex);
3243 Serge 10535
	intel_modeset_setup_hw_state(dev, false);
4539 Serge 10536
	mutex_unlock(&dev->mode_config.mutex);
2330 Serge 10537
}
10538
 
3031 serge 10539
void intel_modeset_cleanup(struct drm_device *dev)
2327 Serge 10540
{
3031 serge 10541
#if 0
10542
	struct drm_i915_private *dev_priv = dev->dev_private;
10543
	struct drm_crtc *crtc;
2327 Serge 10544
 
4104 Serge 10545
	/*
10546
	 * Interrupts and polling as the first thing to avoid creating havoc.
10547
	 * Too much stuff here (turning of rps, connectors, ...) would
10548
	 * experience fancy races otherwise.
10549
	 */
10550
	drm_irq_uninstall(dev);
10551
	cancel_work_sync(&dev_priv->hotplug_work);
10552
	/*
10553
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
10554
	 * poll handlers. Hence disable polling after hpd handling is shut down.
10555
	 */
3031 serge 10556
//   drm_kms_helper_poll_fini(dev);
4104 Serge 10557
 
3031 serge 10558
	mutex_lock(&dev->struct_mutex);
2327 Serge 10559
 
3031 serge 10560
//   intel_unregister_dsm_handler();
2327 Serge 10561
 
3031 serge 10562
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10563
		/* Skip inactive CRTCs */
10564
		if (!crtc->fb)
10565
			continue;
2342 Serge 10566
 
3031 serge 10567
		intel_increase_pllclock(crtc);
10568
	}
2342 Serge 10569
 
3031 serge 10570
	intel_disable_fbc(dev);
2342 Serge 10571
 
3031 serge 10572
	intel_disable_gt_powersave(dev);
2342 Serge 10573
 
3031 serge 10574
	ironlake_teardown_rc6(dev);
2327 Serge 10575
 
3031 serge 10576
	mutex_unlock(&dev->struct_mutex);
2327 Serge 10577
 
4104 Serge 10578
	/* flush any delayed tasks or pending work */
10579
	flush_scheduled_work();
2327 Serge 10580
 
4280 Serge 10581
	/* destroy backlight, if any, before the connectors */
10582
	intel_panel_destroy_backlight(dev);
2327 Serge 10583
 
3031 serge 10584
	drm_mode_config_cleanup(dev);
2327 Serge 10585
#endif
10586
}
10587
 
10588
/*
3031 serge 10589
 * Return which encoder is currently attached for connector.
2327 Serge 10590
 */
3031 serge 10591
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
2327 Serge 10592
{
3031 serge 10593
	return &intel_attached_encoder(connector)->base;
10594
}
2327 Serge 10595
 
3031 serge 10596
void intel_connector_attach_encoder(struct intel_connector *connector,
10597
				    struct intel_encoder *encoder)
10598
{
10599
	connector->encoder = encoder;
10600
	drm_mode_connector_attach_encoder(&connector->base,
10601
					  &encoder->base);
2327 Serge 10602
}
10603
 
10604
/*
3031 serge 10605
 * set vga decode state - true == enable VGA decode
2327 Serge 10606
 */
3031 serge 10607
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
2327 Serge 10608
{
2330 Serge 10609
	struct drm_i915_private *dev_priv = dev->dev_private;
4539 Serge 10610
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
3031 serge 10611
	u16 gmch_ctrl;
2327 Serge 10612
 
4539 Serge 10613
	pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
3031 serge 10614
	if (state)
10615
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
2330 Serge 10616
	else
3031 serge 10617
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
4539 Serge 10618
	pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
3031 serge 10619
	return 0;
2330 Serge 10620
}
10621
 
3031 serge 10622
#ifdef CONFIG_DEBUG_FS
10623
#include 
2327 Serge 10624
 
3031 serge 10625
struct intel_display_error_state {
4104 Serge 10626
 
10627
	u32 power_well_driver;
10628
 
10629
	int num_transcoders;
10630
 
3031 serge 10631
	struct intel_cursor_error_state {
10632
		u32 control;
10633
		u32 position;
10634
		u32 base;
10635
		u32 size;
10636
	} cursor[I915_MAX_PIPES];
2327 Serge 10637
 
3031 serge 10638
	struct intel_pipe_error_state {
10639
		u32 source;
10640
	} pipe[I915_MAX_PIPES];
2327 Serge 10641
 
3031 serge 10642
	struct intel_plane_error_state {
10643
		u32 control;
10644
		u32 stride;
10645
		u32 size;
10646
		u32 pos;
10647
		u32 addr;
10648
		u32 surface;
10649
		u32 tile_offset;
10650
	} plane[I915_MAX_PIPES];
4104 Serge 10651
 
10652
	struct intel_transcoder_error_state {
10653
		enum transcoder cpu_transcoder;
10654
 
10655
		u32 conf;
10656
 
10657
		u32 htotal;
10658
		u32 hblank;
10659
		u32 hsync;
10660
		u32 vtotal;
10661
		u32 vblank;
10662
		u32 vsync;
10663
	} transcoder[4];
3031 serge 10664
};
2327 Serge 10665
 
3031 serge 10666
struct intel_display_error_state *
10667
intel_display_capture_error_state(struct drm_device *dev)
10668
{
10669
	drm_i915_private_t *dev_priv = dev->dev_private;
10670
	struct intel_display_error_state *error;
4104 Serge 10671
	int transcoders[] = {
10672
		TRANSCODER_A,
10673
		TRANSCODER_B,
10674
		TRANSCODER_C,
10675
		TRANSCODER_EDP,
10676
	};
3031 serge 10677
	int i;
2327 Serge 10678
 
4104 Serge 10679
	if (INTEL_INFO(dev)->num_pipes == 0)
10680
		return NULL;
10681
 
3031 serge 10682
	error = kmalloc(sizeof(*error), GFP_ATOMIC);
10683
	if (error == NULL)
10684
		return NULL;
2327 Serge 10685
 
4104 Serge 10686
	if (HAS_POWER_WELL(dev))
10687
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
10688
 
3031 serge 10689
	for_each_pipe(i) {
3746 Serge 10690
		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
3031 serge 10691
		error->cursor[i].control = I915_READ(CURCNTR(i));
10692
		error->cursor[i].position = I915_READ(CURPOS(i));
10693
		error->cursor[i].base = I915_READ(CURBASE(i));
3746 Serge 10694
		} else {
10695
			error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
10696
			error->cursor[i].position = I915_READ(CURPOS_IVB(i));
10697
			error->cursor[i].base = I915_READ(CURBASE_IVB(i));
10698
		}
2327 Serge 10699
 
3031 serge 10700
		error->plane[i].control = I915_READ(DSPCNTR(i));
10701
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
3746 Serge 10702
		if (INTEL_INFO(dev)->gen <= 3) {
3031 serge 10703
		error->plane[i].size = I915_READ(DSPSIZE(i));
10704
		error->plane[i].pos = I915_READ(DSPPOS(i));
3746 Serge 10705
		}
10706
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3031 serge 10707
		error->plane[i].addr = I915_READ(DSPADDR(i));
10708
		if (INTEL_INFO(dev)->gen >= 4) {
10709
			error->plane[i].surface = I915_READ(DSPSURF(i));
10710
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
10711
		}
2327 Serge 10712
 
3031 serge 10713
		error->pipe[i].source = I915_READ(PIPESRC(i));
10714
	}
2327 Serge 10715
 
4104 Serge 10716
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
10717
	if (HAS_DDI(dev_priv->dev))
10718
		error->num_transcoders++; /* Account for eDP. */
10719
 
10720
	for (i = 0; i < error->num_transcoders; i++) {
10721
		enum transcoder cpu_transcoder = transcoders[i];
10722
 
10723
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
10724
 
10725
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
10726
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
10727
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
10728
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
10729
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
10730
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
10731
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
10732
	}
10733
 
10734
	/* In the code above we read the registers without checking if the power
10735
	 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
10736
	 * prevent the next I915_WRITE from detecting it and printing an error
10737
	 * message. */
10738
	intel_uncore_clear_errors(dev);
10739
 
3031 serge 10740
	return error;
2330 Serge 10741
}
2327 Serge 10742
 
4104 Serge 10743
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
10744
 
3031 serge 10745
void
4104 Serge 10746
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
3031 serge 10747
				struct drm_device *dev,
10748
				struct intel_display_error_state *error)
2332 Serge 10749
{
3031 serge 10750
	int i;
2330 Serge 10751
 
4104 Serge 10752
	if (!error)
10753
		return;
10754
 
10755
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
10756
	if (HAS_POWER_WELL(dev))
10757
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
10758
			   error->power_well_driver);
3031 serge 10759
	for_each_pipe(i) {
4104 Serge 10760
		err_printf(m, "Pipe [%d]:\n", i);
10761
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
2332 Serge 10762
 
4104 Serge 10763
		err_printf(m, "Plane [%d]:\n", i);
10764
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
10765
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
3746 Serge 10766
		if (INTEL_INFO(dev)->gen <= 3) {
4104 Serge 10767
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
10768
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
3746 Serge 10769
		}
10770
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
4104 Serge 10771
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
3031 serge 10772
		if (INTEL_INFO(dev)->gen >= 4) {
4104 Serge 10773
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
10774
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
3031 serge 10775
		}
2332 Serge 10776
 
4104 Serge 10777
		err_printf(m, "Cursor [%d]:\n", i);
10778
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
10779
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
10780
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
3031 serge 10781
	}
4104 Serge 10782
 
10783
	for (i = 0; i < error->num_transcoders; i++) {
10784
		err_printf(m, "  CPU transcoder: %c\n",
10785
			   transcoder_name(error->transcoder[i].cpu_transcoder));
10786
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
10787
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
10788
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
10789
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
10790
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
10791
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
10792
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
10793
	}
2327 Serge 10794
}
3031 serge 10795
#endif