Subversion Repositories Kolibri OS

Rev

Rev 2330 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
27
//#include 
28
#include 
29
//#include 
30
#include 
31
#include 
32
//#include 
33
//#include 
34
#include "drmP.h"
35
#include "intel_drv.h"
36
#include "i915_drv.h"
37
//#include "i915_trace.h"
38
#include "drm_dp_helper.h"
39
 
40
#include "drm_crtc_helper.h"
41
 
42
#include 
43
 
44
phys_addr_t get_bus_addr(void);
45
 
46
static inline __attribute__((const))
47
bool is_power_of_2(unsigned long n)
48
{
49
    return (n != 0 && ((n & (n - 1)) == 0));
50
}
51
 
52
static inline int pci_read_config_word(struct pci_dev *dev, int where,
53
                    u16 *val)
54
{
55
    *val = PciRead16(dev->busnr, dev->devfn, where);
56
    return 1;
57
}
58
 
59
 
60
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
61
 
62
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
63
static void intel_update_watermarks(struct drm_device *dev);
64
static void intel_increase_pllclock(struct drm_crtc *crtc);
65
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
66
 
67
typedef struct {
68
    /* given values */
69
    int n;
70
    int m1, m2;
71
    int p1, p2;
72
    /* derived values */
73
    int dot;
74
    int vco;
75
    int m;
76
    int p;
77
} intel_clock_t;
78
 
79
typedef struct {
80
    int min, max;
81
} intel_range_t;
82
 
83
typedef struct {
84
    int dot_limit;
85
    int p2_slow, p2_fast;
86
} intel_p2_t;
87
 
88
#define INTEL_P2_NUM              2
89
typedef struct intel_limit intel_limit_t;
90
struct intel_limit {
91
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
92
    intel_p2_t      p2;
93
    bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
94
              int, int, intel_clock_t *);
95
};
96
 
97
/* FDI */
98
#define IRONLAKE_FDI_FREQ       2700000 /* in kHz for mode->clock */
99
 
100
static bool
101
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
102
            int target, int refclk, intel_clock_t *best_clock);
103
static bool
104
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
105
            int target, int refclk, intel_clock_t *best_clock);
106
 
107
static bool
108
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
109
              int target, int refclk, intel_clock_t *best_clock);
110
static bool
111
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
112
               int target, int refclk, intel_clock_t *best_clock);
113
 
114
static inline u32 /* units of 100MHz */
115
intel_fdi_link_freq(struct drm_device *dev)
116
{
117
	if (IS_GEN5(dev)) {
118
		struct drm_i915_private *dev_priv = dev->dev_private;
119
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
120
	} else
121
		return 27;
122
}
123
 
124
static const intel_limit_t intel_limits_i8xx_dvo = {
125
        .dot = { .min = 25000, .max = 350000 },
126
        .vco = { .min = 930000, .max = 1400000 },
127
        .n = { .min = 3, .max = 16 },
128
        .m = { .min = 96, .max = 140 },
129
        .m1 = { .min = 18, .max = 26 },
130
        .m2 = { .min = 6, .max = 16 },
131
        .p = { .min = 4, .max = 128 },
132
        .p1 = { .min = 2, .max = 33 },
133
	.p2 = { .dot_limit = 165000,
134
		.p2_slow = 4, .p2_fast = 2 },
135
	.find_pll = intel_find_best_PLL,
136
};
137
 
138
static const intel_limit_t intel_limits_i8xx_lvds = {
139
        .dot = { .min = 25000, .max = 350000 },
140
        .vco = { .min = 930000, .max = 1400000 },
141
        .n = { .min = 3, .max = 16 },
142
        .m = { .min = 96, .max = 140 },
143
        .m1 = { .min = 18, .max = 26 },
144
        .m2 = { .min = 6, .max = 16 },
145
        .p = { .min = 4, .max = 128 },
146
        .p1 = { .min = 1, .max = 6 },
147
	.p2 = { .dot_limit = 165000,
148
		.p2_slow = 14, .p2_fast = 7 },
149
	.find_pll = intel_find_best_PLL,
150
};
151
 
152
static const intel_limit_t intel_limits_i9xx_sdvo = {
153
        .dot = { .min = 20000, .max = 400000 },
154
        .vco = { .min = 1400000, .max = 2800000 },
155
        .n = { .min = 1, .max = 6 },
156
        .m = { .min = 70, .max = 120 },
157
        .m1 = { .min = 10, .max = 22 },
158
        .m2 = { .min = 5, .max = 9 },
159
        .p = { .min = 5, .max = 80 },
160
        .p1 = { .min = 1, .max = 8 },
161
	.p2 = { .dot_limit = 200000,
162
		.p2_slow = 10, .p2_fast = 5 },
163
	.find_pll = intel_find_best_PLL,
164
};
165
 
166
static const intel_limit_t intel_limits_i9xx_lvds = {
167
        .dot = { .min = 20000, .max = 400000 },
168
        .vco = { .min = 1400000, .max = 2800000 },
169
        .n = { .min = 1, .max = 6 },
170
        .m = { .min = 70, .max = 120 },
171
        .m1 = { .min = 10, .max = 22 },
172
        .m2 = { .min = 5, .max = 9 },
173
        .p = { .min = 7, .max = 98 },
174
        .p1 = { .min = 1, .max = 8 },
175
	.p2 = { .dot_limit = 112000,
176
		.p2_slow = 14, .p2_fast = 7 },
177
	.find_pll = intel_find_best_PLL,
178
};
179
 
180
 
181
static const intel_limit_t intel_limits_g4x_sdvo = {
182
	.dot = { .min = 25000, .max = 270000 },
183
	.vco = { .min = 1750000, .max = 3500000},
184
	.n = { .min = 1, .max = 4 },
185
	.m = { .min = 104, .max = 138 },
186
	.m1 = { .min = 17, .max = 23 },
187
	.m2 = { .min = 5, .max = 11 },
188
	.p = { .min = 10, .max = 30 },
189
	.p1 = { .min = 1, .max = 3},
190
	.p2 = { .dot_limit = 270000,
191
		.p2_slow = 10,
192
		.p2_fast = 10
193
	},
194
	.find_pll = intel_g4x_find_best_PLL,
195
};
196
 
197
static const intel_limit_t intel_limits_g4x_hdmi = {
198
	.dot = { .min = 22000, .max = 400000 },
199
	.vco = { .min = 1750000, .max = 3500000},
200
	.n = { .min = 1, .max = 4 },
201
	.m = { .min = 104, .max = 138 },
202
	.m1 = { .min = 16, .max = 23 },
203
	.m2 = { .min = 5, .max = 11 },
204
	.p = { .min = 5, .max = 80 },
205
	.p1 = { .min = 1, .max = 8},
206
	.p2 = { .dot_limit = 165000,
207
		.p2_slow = 10, .p2_fast = 5 },
208
	.find_pll = intel_g4x_find_best_PLL,
209
};
210
 
211
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
212
	.dot = { .min = 20000, .max = 115000 },
213
	.vco = { .min = 1750000, .max = 3500000 },
214
	.n = { .min = 1, .max = 3 },
215
	.m = { .min = 104, .max = 138 },
216
	.m1 = { .min = 17, .max = 23 },
217
	.m2 = { .min = 5, .max = 11 },
218
	.p = { .min = 28, .max = 112 },
219
	.p1 = { .min = 2, .max = 8 },
220
	.p2 = { .dot_limit = 0,
221
		.p2_slow = 14, .p2_fast = 14
222
	},
223
	.find_pll = intel_g4x_find_best_PLL,
224
};
225
 
226
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
227
	.dot = { .min = 80000, .max = 224000 },
228
	.vco = { .min = 1750000, .max = 3500000 },
229
	.n = { .min = 1, .max = 3 },
230
	.m = { .min = 104, .max = 138 },
231
	.m1 = { .min = 17, .max = 23 },
232
	.m2 = { .min = 5, .max = 11 },
233
	.p = { .min = 14, .max = 42 },
234
	.p1 = { .min = 2, .max = 6 },
235
	.p2 = { .dot_limit = 0,
236
		.p2_slow = 7, .p2_fast = 7
237
	},
238
	.find_pll = intel_g4x_find_best_PLL,
239
};
240
 
241
static const intel_limit_t intel_limits_g4x_display_port = {
242
        .dot = { .min = 161670, .max = 227000 },
243
        .vco = { .min = 1750000, .max = 3500000},
244
        .n = { .min = 1, .max = 2 },
245
        .m = { .min = 97, .max = 108 },
246
        .m1 = { .min = 0x10, .max = 0x12 },
247
        .m2 = { .min = 0x05, .max = 0x06 },
248
        .p = { .min = 10, .max = 20 },
249
        .p1 = { .min = 1, .max = 2},
250
        .p2 = { .dot_limit = 0,
251
		.p2_slow = 10, .p2_fast = 10 },
252
        .find_pll = intel_find_pll_g4x_dp,
253
};
254
 
255
static const intel_limit_t intel_limits_pineview_sdvo = {
256
        .dot = { .min = 20000, .max = 400000},
257
        .vco = { .min = 1700000, .max = 3500000 },
258
	/* Pineview's Ncounter is a ring counter */
259
        .n = { .min = 3, .max = 6 },
260
        .m = { .min = 2, .max = 256 },
261
	/* Pineview only has one combined m divider, which we treat as m2. */
262
        .m1 = { .min = 0, .max = 0 },
263
        .m2 = { .min = 0, .max = 254 },
264
        .p = { .min = 5, .max = 80 },
265
        .p1 = { .min = 1, .max = 8 },
266
	.p2 = { .dot_limit = 200000,
267
		.p2_slow = 10, .p2_fast = 5 },
268
	.find_pll = intel_find_best_PLL,
269
};
270
 
271
static const intel_limit_t intel_limits_pineview_lvds = {
272
        .dot = { .min = 20000, .max = 400000 },
273
        .vco = { .min = 1700000, .max = 3500000 },
274
        .n = { .min = 3, .max = 6 },
275
        .m = { .min = 2, .max = 256 },
276
        .m1 = { .min = 0, .max = 0 },
277
        .m2 = { .min = 0, .max = 254 },
278
        .p = { .min = 7, .max = 112 },
279
        .p1 = { .min = 1, .max = 8 },
280
	.p2 = { .dot_limit = 112000,
281
		.p2_slow = 14, .p2_fast = 14 },
282
	.find_pll = intel_find_best_PLL,
283
};
284
 
285
/* Ironlake / Sandybridge
286
 *
287
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
288
 * the range value for them is (actual_value - 2).
289
 */
290
static const intel_limit_t intel_limits_ironlake_dac = {
291
	.dot = { .min = 25000, .max = 350000 },
292
	.vco = { .min = 1760000, .max = 3510000 },
293
	.n = { .min = 1, .max = 5 },
294
	.m = { .min = 79, .max = 127 },
295
	.m1 = { .min = 12, .max = 22 },
296
	.m2 = { .min = 5, .max = 9 },
297
	.p = { .min = 5, .max = 80 },
298
	.p1 = { .min = 1, .max = 8 },
299
	.p2 = { .dot_limit = 225000,
300
		.p2_slow = 10, .p2_fast = 5 },
301
	.find_pll = intel_g4x_find_best_PLL,
302
};
303
 
304
static const intel_limit_t intel_limits_ironlake_single_lvds = {
305
	.dot = { .min = 25000, .max = 350000 },
306
	.vco = { .min = 1760000, .max = 3510000 },
307
	.n = { .min = 1, .max = 3 },
308
	.m = { .min = 79, .max = 118 },
309
	.m1 = { .min = 12, .max = 22 },
310
	.m2 = { .min = 5, .max = 9 },
311
	.p = { .min = 28, .max = 112 },
312
	.p1 = { .min = 2, .max = 8 },
313
	.p2 = { .dot_limit = 225000,
314
		.p2_slow = 14, .p2_fast = 14 },
315
	.find_pll = intel_g4x_find_best_PLL,
316
};
317
 
318
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
319
	.dot = { .min = 25000, .max = 350000 },
320
	.vco = { .min = 1760000, .max = 3510000 },
321
	.n = { .min = 1, .max = 3 },
322
	.m = { .min = 79, .max = 127 },
323
	.m1 = { .min = 12, .max = 22 },
324
	.m2 = { .min = 5, .max = 9 },
325
	.p = { .min = 14, .max = 56 },
326
	.p1 = { .min = 2, .max = 8 },
327
	.p2 = { .dot_limit = 225000,
328
		.p2_slow = 7, .p2_fast = 7 },
329
	.find_pll = intel_g4x_find_best_PLL,
330
};
331
 
332
/* LVDS 100mhz refclk limits. */
333
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
334
	.dot = { .min = 25000, .max = 350000 },
335
	.vco = { .min = 1760000, .max = 3510000 },
336
	.n = { .min = 1, .max = 2 },
337
	.m = { .min = 79, .max = 126 },
338
	.m1 = { .min = 12, .max = 22 },
339
	.m2 = { .min = 5, .max = 9 },
340
	.p = { .min = 28, .max = 112 },
341
	.p1 = { .min = 2,.max = 8 },
342
	.p2 = { .dot_limit = 225000,
343
		.p2_slow = 14, .p2_fast = 14 },
344
	.find_pll = intel_g4x_find_best_PLL,
345
};
346
 
347
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
348
	.dot = { .min = 25000, .max = 350000 },
349
	.vco = { .min = 1760000, .max = 3510000 },
350
	.n = { .min = 1, .max = 3 },
351
	.m = { .min = 79, .max = 126 },
352
	.m1 = { .min = 12, .max = 22 },
353
	.m2 = { .min = 5, .max = 9 },
354
	.p = { .min = 14, .max = 42 },
355
	.p1 = { .min = 2,.max = 6 },
356
	.p2 = { .dot_limit = 225000,
357
		.p2_slow = 7, .p2_fast = 7 },
358
	.find_pll = intel_g4x_find_best_PLL,
359
};
360
 
361
static const intel_limit_t intel_limits_ironlake_display_port = {
362
        .dot = { .min = 25000, .max = 350000 },
363
        .vco = { .min = 1760000, .max = 3510000},
364
        .n = { .min = 1, .max = 2 },
365
        .m = { .min = 81, .max = 90 },
366
        .m1 = { .min = 12, .max = 22 },
367
        .m2 = { .min = 5, .max = 9 },
368
        .p = { .min = 10, .max = 20 },
369
        .p1 = { .min = 1, .max = 2},
370
        .p2 = { .dot_limit = 0,
371
		.p2_slow = 10, .p2_fast = 10 },
372
        .find_pll = intel_find_pll_ironlake_dp,
373
};
374
 
375
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
376
						int refclk)
377
{
378
	struct drm_device *dev = crtc->dev;
379
	struct drm_i915_private *dev_priv = dev->dev_private;
380
	const intel_limit_t *limit;
381
 
382
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
383
		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
384
		    LVDS_CLKB_POWER_UP) {
385
			/* LVDS dual channel */
386
			if (refclk == 100000)
387
				limit = &intel_limits_ironlake_dual_lvds_100m;
388
			else
389
				limit = &intel_limits_ironlake_dual_lvds;
390
		} else {
391
			if (refclk == 100000)
392
				limit = &intel_limits_ironlake_single_lvds_100m;
393
			else
394
				limit = &intel_limits_ironlake_single_lvds;
395
		}
396
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
397
			HAS_eDP)
398
		limit = &intel_limits_ironlake_display_port;
399
	else
400
		limit = &intel_limits_ironlake_dac;
401
 
402
	return limit;
403
}
404
 
405
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
406
{
407
	struct drm_device *dev = crtc->dev;
408
	struct drm_i915_private *dev_priv = dev->dev_private;
409
	const intel_limit_t *limit;
410
 
411
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
412
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
413
		    LVDS_CLKB_POWER_UP)
414
			/* LVDS with dual channel */
415
			limit = &intel_limits_g4x_dual_channel_lvds;
416
		else
417
			/* LVDS with dual channel */
418
			limit = &intel_limits_g4x_single_channel_lvds;
419
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
420
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
421
		limit = &intel_limits_g4x_hdmi;
422
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
423
		limit = &intel_limits_g4x_sdvo;
424
	} else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
425
		limit = &intel_limits_g4x_display_port;
426
	} else /* The option is for other outputs */
427
		limit = &intel_limits_i9xx_sdvo;
428
 
429
	return limit;
430
}
431
 
432
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
433
{
434
	struct drm_device *dev = crtc->dev;
435
	const intel_limit_t *limit;
436
 
437
	if (HAS_PCH_SPLIT(dev))
438
		limit = intel_ironlake_limit(crtc, refclk);
439
	else if (IS_G4X(dev)) {
440
		limit = intel_g4x_limit(crtc);
441
	} else if (IS_PINEVIEW(dev)) {
442
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
443
			limit = &intel_limits_pineview_lvds;
444
		else
445
			limit = &intel_limits_pineview_sdvo;
446
	} else if (!IS_GEN2(dev)) {
447
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
448
			limit = &intel_limits_i9xx_lvds;
449
		else
450
			limit = &intel_limits_i9xx_sdvo;
451
	} else {
452
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
453
			limit = &intel_limits_i8xx_lvds;
454
		else
455
			limit = &intel_limits_i8xx_dvo;
456
	}
457
	return limit;
458
}
459
 
460
/* m1 is reserved as 0 in Pineview, n is a ring counter */
461
static void pineview_clock(int refclk, intel_clock_t *clock)
462
{
463
	clock->m = clock->m2 + 2;
464
	clock->p = clock->p1 * clock->p2;
465
	clock->vco = refclk * clock->m / clock->n;
466
	clock->dot = clock->vco / clock->p;
467
}
468
 
469
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
470
{
471
	if (IS_PINEVIEW(dev)) {
472
		pineview_clock(refclk, clock);
473
		return;
474
	}
475
	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
476
	clock->p = clock->p1 * clock->p2;
477
	clock->vco = refclk * clock->m / (clock->n + 2);
478
	clock->dot = clock->vco / clock->p;
479
}
480
 
481
/**
482
 * Returns whether any output on the specified pipe is of the specified type
483
 */
484
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
485
{
486
	struct drm_device *dev = crtc->dev;
487
	struct drm_mode_config *mode_config = &dev->mode_config;
488
	struct intel_encoder *encoder;
489
 
490
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
491
		if (encoder->base.crtc == crtc && encoder->type == type)
492
			return true;
493
 
494
	return false;
495
}
496
 
497
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
498
/**
499
 * Returns whether the given set of divisors are valid for a given refclk with
500
 * the given connectors.
501
 */
502
 
503
static bool intel_PLL_is_valid(struct drm_device *dev,
504
			       const intel_limit_t *limit,
505
			       const intel_clock_t *clock)
506
{
507
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
508
		INTELPllInvalid ("p1 out of range\n");
509
	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
510
		INTELPllInvalid ("p out of range\n");
511
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
512
		INTELPllInvalid ("m2 out of range\n");
513
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
514
		INTELPllInvalid ("m1 out of range\n");
515
	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
516
		INTELPllInvalid ("m1 <= m2\n");
517
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
518
		INTELPllInvalid ("m out of range\n");
519
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
520
		INTELPllInvalid ("n out of range\n");
521
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
522
		INTELPllInvalid ("vco out of range\n");
523
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
524
	 * connector, etc., rather than just a single range.
525
	 */
526
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
527
		INTELPllInvalid ("dot out of range\n");
528
 
529
	return true;
530
}
531
 
532
static bool
533
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
534
		    int target, int refclk, intel_clock_t *best_clock)
535
 
536
{
537
	struct drm_device *dev = crtc->dev;
538
	struct drm_i915_private *dev_priv = dev->dev_private;
539
	intel_clock_t clock;
540
	int err = target;
541
 
542
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
543
	    (I915_READ(LVDS)) != 0) {
544
		/*
545
		 * For LVDS, if the panel is on, just rely on its current
546
		 * settings for dual-channel.  We haven't figured out how to
547
		 * reliably set up different single/dual channel state, if we
548
		 * even can.
549
		 */
550
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
551
		    LVDS_CLKB_POWER_UP)
552
			clock.p2 = limit->p2.p2_fast;
553
		else
554
			clock.p2 = limit->p2.p2_slow;
555
	} else {
556
		if (target < limit->p2.dot_limit)
557
			clock.p2 = limit->p2.p2_slow;
558
		else
559
			clock.p2 = limit->p2.p2_fast;
560
	}
561
 
562
	memset (best_clock, 0, sizeof (*best_clock));
563
 
564
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
565
	     clock.m1++) {
566
		for (clock.m2 = limit->m2.min;
567
		     clock.m2 <= limit->m2.max; clock.m2++) {
568
			/* m1 is always 0 in Pineview */
569
			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
570
				break;
571
			for (clock.n = limit->n.min;
572
			     clock.n <= limit->n.max; clock.n++) {
573
				for (clock.p1 = limit->p1.min;
574
					clock.p1 <= limit->p1.max; clock.p1++) {
575
					int this_err;
576
 
577
					intel_clock(dev, refclk, &clock);
578
					if (!intel_PLL_is_valid(dev, limit,
579
								&clock))
580
						continue;
581
 
582
					this_err = abs(clock.dot - target);
583
					if (this_err < err) {
584
						*best_clock = clock;
585
						err = this_err;
586
					}
587
				}
588
			}
589
		}
590
	}
591
 
592
	return (err != target);
593
}
594
 
595
static bool
596
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
597
			int target, int refclk, intel_clock_t *best_clock)
598
{
599
	struct drm_device *dev = crtc->dev;
600
	struct drm_i915_private *dev_priv = dev->dev_private;
601
	intel_clock_t clock;
602
	int max_n;
603
	bool found;
604
	/* approximately equals target * 0.00585 */
605
	int err_most = (target >> 8) + (target >> 9);
606
	found = false;
607
 
608
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
609
		int lvds_reg;
610
 
611
		if (HAS_PCH_SPLIT(dev))
612
			lvds_reg = PCH_LVDS;
613
		else
614
			lvds_reg = LVDS;
615
		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
616
		    LVDS_CLKB_POWER_UP)
617
			clock.p2 = limit->p2.p2_fast;
618
		else
619
			clock.p2 = limit->p2.p2_slow;
620
	} else {
621
		if (target < limit->p2.dot_limit)
622
			clock.p2 = limit->p2.p2_slow;
623
		else
624
			clock.p2 = limit->p2.p2_fast;
625
	}
626
 
627
	memset(best_clock, 0, sizeof(*best_clock));
628
	max_n = limit->n.max;
629
	/* based on hardware requirement, prefer smaller n to precision */
630
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
631
		/* based on hardware requirement, prefere larger m1,m2 */
632
		for (clock.m1 = limit->m1.max;
633
		     clock.m1 >= limit->m1.min; clock.m1--) {
634
			for (clock.m2 = limit->m2.max;
635
			     clock.m2 >= limit->m2.min; clock.m2--) {
636
				for (clock.p1 = limit->p1.max;
637
				     clock.p1 >= limit->p1.min; clock.p1--) {
638
					int this_err;
639
 
640
					intel_clock(dev, refclk, &clock);
641
					if (!intel_PLL_is_valid(dev, limit,
642
								&clock))
643
						continue;
644
 
645
					this_err = abs(clock.dot - target);
646
					if (this_err < err_most) {
647
						*best_clock = clock;
648
						err_most = this_err;
649
						max_n = clock.n;
650
						found = true;
651
					}
652
				}
653
			}
654
		}
655
	}
656
	return found;
657
}
658
 
659
static bool
660
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
661
			   int target, int refclk, intel_clock_t *best_clock)
662
{
663
	struct drm_device *dev = crtc->dev;
664
	intel_clock_t clock;
665
 
666
	if (target < 200000) {
667
		clock.n = 1;
668
		clock.p1 = 2;
669
		clock.p2 = 10;
670
		clock.m1 = 12;
671
		clock.m2 = 9;
672
	} else {
673
		clock.n = 2;
674
		clock.p1 = 1;
675
		clock.p2 = 10;
676
		clock.m1 = 14;
677
		clock.m2 = 8;
678
	}
679
	intel_clock(dev, refclk, &clock);
680
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
681
	return true;
682
}
683
 
684
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
685
static bool
686
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
687
		      int target, int refclk, intel_clock_t *best_clock)
688
{
689
	intel_clock_t clock;
690
	if (target < 200000) {
691
		clock.p1 = 2;
692
		clock.p2 = 10;
693
		clock.n = 2;
694
		clock.m1 = 23;
695
		clock.m2 = 8;
696
	} else {
697
		clock.p1 = 1;
698
		clock.p2 = 10;
699
		clock.n = 1;
700
		clock.m1 = 14;
701
		clock.m2 = 2;
702
	}
703
	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
704
	clock.p = (clock.p1 * clock.p2);
705
	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
706
	clock.vco = 0;
707
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
708
	return true;
709
}
710
 
711
/**
712
 * intel_wait_for_vblank - wait for vblank on a given pipe
713
 * @dev: drm device
714
 * @pipe: pipe to wait for
715
 *
716
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
717
 * mode setting code.
718
 */
719
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
720
{
721
	struct drm_i915_private *dev_priv = dev->dev_private;
722
	int pipestat_reg = PIPESTAT(pipe);
723
 
724
	/* Clear existing vblank status. Note this will clear any other
725
	 * sticky status fields as well.
726
	 *
727
	 * This races with i915_driver_irq_handler() with the result
728
	 * that either function could miss a vblank event.  Here it is not
729
	 * fatal, as we will either wait upon the next vblank interrupt or
730
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
731
	 * called during modeset at which time the GPU should be idle and
732
	 * should *not* be performing page flips and thus not waiting on
733
	 * vblanks...
734
	 * Currently, the result of us stealing a vblank from the irq
735
	 * handler is that a single frame will be skipped during swapbuffers.
736
	 */
737
	I915_WRITE(pipestat_reg,
738
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
739
 
740
	/* Wait for vblank interrupt bit to set */
741
	if (wait_for(I915_READ(pipestat_reg) &
742
		     PIPE_VBLANK_INTERRUPT_STATUS,
743
		     50))
744
		DRM_DEBUG_KMS("vblank wait timed out\n");
745
}
746
 
747
/*
748
 * intel_wait_for_pipe_off - wait for pipe to turn off
749
 * @dev: drm device
750
 * @pipe: pipe to wait for
751
 *
752
 * After disabling a pipe, we can't wait for vblank in the usual way,
753
 * spinning on the vblank interrupt status bit, since we won't actually
754
 * see an interrupt when the pipe is disabled.
755
 *
756
 * On Gen4 and above:
757
 *   wait for the pipe register state bit to turn off
758
 *
759
 * Otherwise:
760
 *   wait for the display line value to settle (it usually
761
 *   ends up stopping at the start of the next frame).
762
 *
763
 */
764
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
765
{
766
	struct drm_i915_private *dev_priv = dev->dev_private;
767
 
768
	if (INTEL_INFO(dev)->gen >= 4) {
769
		int reg = PIPECONF(pipe);
770
 
771
		/* Wait for the Pipe State to go off */
772
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
773
			     100))
774
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
775
	} else {
776
		u32 last_line;
777
		int reg = PIPEDSL(pipe);
778
		unsigned long timeout = jiffies + msecs_to_jiffies(100);
779
 
780
		/* Wait for the display line to settle */
781
		do {
782
			last_line = I915_READ(reg) & DSL_LINEMASK;
783
			mdelay(5);
784
		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
785
			 time_after(timeout, jiffies));
786
		if (time_after(jiffies, timeout))
787
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
788
	}
789
}
790
 
791
static const char *state_string(bool enabled)
792
{
793
	return enabled ? "on" : "off";
794
}
795
 
796
/* Only for pre-ILK configs */
797
static void assert_pll(struct drm_i915_private *dev_priv,
798
		       enum pipe pipe, bool state)
799
{
800
	int reg;
801
	u32 val;
802
	bool cur_state;
803
 
804
	reg = DPLL(pipe);
805
	val = I915_READ(reg);
806
	cur_state = !!(val & DPLL_VCO_ENABLE);
807
	WARN(cur_state != state,
808
	     "PLL state assertion failure (expected %s, current %s)\n",
809
	     state_string(state), state_string(cur_state));
810
}
811
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
812
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
813
 
814
/* For ILK+ */
815
static void assert_pch_pll(struct drm_i915_private *dev_priv,
816
			   enum pipe pipe, bool state)
817
{
818
	int reg;
819
	u32 val;
820
	bool cur_state;
821
 
822
	reg = PCH_DPLL(pipe);
823
	val = I915_READ(reg);
824
	cur_state = !!(val & DPLL_VCO_ENABLE);
825
	WARN(cur_state != state,
826
	     "PCH PLL state assertion failure (expected %s, current %s)\n",
827
	     state_string(state), state_string(cur_state));
828
}
829
#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
830
#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
831
 
832
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
833
			  enum pipe pipe, bool state)
834
{
835
	int reg;
836
	u32 val;
837
	bool cur_state;
838
 
839
	reg = FDI_TX_CTL(pipe);
840
	val = I915_READ(reg);
841
	cur_state = !!(val & FDI_TX_ENABLE);
842
	WARN(cur_state != state,
843
	     "FDI TX state assertion failure (expected %s, current %s)\n",
844
	     state_string(state), state_string(cur_state));
845
}
846
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
847
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
848
 
849
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
850
			  enum pipe pipe, bool state)
851
{
852
	int reg;
853
	u32 val;
854
	bool cur_state;
855
 
856
	reg = FDI_RX_CTL(pipe);
857
	val = I915_READ(reg);
858
	cur_state = !!(val & FDI_RX_ENABLE);
859
	WARN(cur_state != state,
860
	     "FDI RX state assertion failure (expected %s, current %s)\n",
861
	     state_string(state), state_string(cur_state));
862
}
863
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
864
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
865
 
866
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
867
				      enum pipe pipe)
868
{
869
	int reg;
870
	u32 val;
871
 
872
	/* ILK FDI PLL is always enabled */
873
	if (dev_priv->info->gen == 5)
874
		return;
875
 
876
	reg = FDI_TX_CTL(pipe);
877
	val = I915_READ(reg);
878
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
879
}
880
 
881
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
882
				      enum pipe pipe)
883
{
884
	int reg;
885
	u32 val;
886
 
887
	reg = FDI_RX_CTL(pipe);
888
	val = I915_READ(reg);
889
	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
890
}
891
 
892
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
893
				  enum pipe pipe)
894
{
895
	int pp_reg, lvds_reg;
896
	u32 val;
897
	enum pipe panel_pipe = PIPE_A;
898
	bool locked = true;
899
 
900
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
901
		pp_reg = PCH_PP_CONTROL;
902
		lvds_reg = PCH_LVDS;
903
	} else {
904
		pp_reg = PP_CONTROL;
905
		lvds_reg = LVDS;
906
	}
907
 
908
	val = I915_READ(pp_reg);
909
	if (!(val & PANEL_POWER_ON) ||
910
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
911
		locked = false;
912
 
913
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
914
		panel_pipe = PIPE_B;
915
 
916
	WARN(panel_pipe == pipe && locked,
917
	     "panel assertion failure, pipe %c regs locked\n",
918
	     pipe_name(pipe));
919
}
920
 
921
static void assert_pipe(struct drm_i915_private *dev_priv,
922
			enum pipe pipe, bool state)
923
{
924
	int reg;
925
	u32 val;
926
	bool cur_state;
927
 
928
	reg = PIPECONF(pipe);
929
	val = I915_READ(reg);
930
	cur_state = !!(val & PIPECONF_ENABLE);
931
	WARN(cur_state != state,
932
	     "pipe %c assertion failure (expected %s, current %s)\n",
933
	     pipe_name(pipe), state_string(state), state_string(cur_state));
934
}
935
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
936
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
937
 
938
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
939
				 enum plane plane)
940
{
941
	int reg;
942
	u32 val;
943
 
944
	reg = DSPCNTR(plane);
945
	val = I915_READ(reg);
946
	WARN(!(val & DISPLAY_PLANE_ENABLE),
947
	     "plane %c assertion failure, should be active but is disabled\n",
948
	     plane_name(plane));
949
}
950
 
951
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
952
				   enum pipe pipe)
953
{
954
	int reg, i;
955
	u32 val;
956
	int cur_pipe;
957
 
958
	/* Planes are fixed to pipes on ILK+ */
959
	if (HAS_PCH_SPLIT(dev_priv->dev))
960
		return;
961
 
962
	/* Need to check both planes against the pipe */
963
	for (i = 0; i < 2; i++) {
964
		reg = DSPCNTR(i);
965
		val = I915_READ(reg);
966
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
967
			DISPPLANE_SEL_PIPE_SHIFT;
968
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
969
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
970
		     plane_name(i), pipe_name(pipe));
971
	}
972
}
973
 
974
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
975
{
976
	u32 val;
977
	bool enabled;
978
 
979
	val = I915_READ(PCH_DREF_CONTROL);
980
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
981
			    DREF_SUPERSPREAD_SOURCE_MASK));
982
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
983
}
984
 
985
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
986
				       enum pipe pipe)
987
{
988
	int reg;
989
	u32 val;
990
	bool enabled;
991
 
992
	reg = TRANSCONF(pipe);
993
	val = I915_READ(reg);
994
	enabled = !!(val & TRANS_ENABLE);
995
	WARN(enabled,
996
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
997
	     pipe_name(pipe));
998
}
999
 
1000
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1001
			    enum pipe pipe, u32 port_sel, u32 val)
1002
{
1003
	if ((val & DP_PORT_EN) == 0)
1004
		return false;
1005
 
1006
	if (HAS_PCH_CPT(dev_priv->dev)) {
1007
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1008
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1009
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1010
			return false;
1011
	} else {
1012
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1013
			return false;
1014
	}
1015
	return true;
1016
}
1017
 
1018
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1019
			      enum pipe pipe, u32 val)
1020
{
1021
	if ((val & PORT_ENABLE) == 0)
1022
		return false;
1023
 
1024
	if (HAS_PCH_CPT(dev_priv->dev)) {
1025
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1026
			return false;
1027
	} else {
1028
		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1029
			return false;
1030
	}
1031
	return true;
1032
}
1033
 
1034
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1035
			      enum pipe pipe, u32 val)
1036
{
1037
	if ((val & LVDS_PORT_EN) == 0)
1038
		return false;
1039
 
1040
	if (HAS_PCH_CPT(dev_priv->dev)) {
1041
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1042
			return false;
1043
	} else {
1044
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1045
			return false;
1046
	}
1047
	return true;
1048
}
1049
 
1050
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1051
			      enum pipe pipe, u32 val)
1052
{
1053
	if ((val & ADPA_DAC_ENABLE) == 0)
1054
		return false;
1055
	if (HAS_PCH_CPT(dev_priv->dev)) {
1056
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1057
			return false;
1058
	} else {
1059
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1060
			return false;
1061
	}
1062
	return true;
1063
}
1064
 
1065
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1066
				   enum pipe pipe, int reg, u32 port_sel)
1067
{
1068
	u32 val = I915_READ(reg);
1069
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1070
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1071
	     reg, pipe_name(pipe));
1072
}
1073
 
1074
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1075
				     enum pipe pipe, int reg)
1076
{
1077
	u32 val = I915_READ(reg);
1078
	WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1079
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1080
	     reg, pipe_name(pipe));
1081
}
1082
 
1083
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1084
				      enum pipe pipe)
1085
{
1086
	int reg;
1087
	u32 val;
1088
 
1089
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1090
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1091
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1092
 
1093
	reg = PCH_ADPA;
1094
	val = I915_READ(reg);
1095
	WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1096
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1097
	     pipe_name(pipe));
1098
 
1099
	reg = PCH_LVDS;
1100
	val = I915_READ(reg);
1101
	WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1102
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1103
	     pipe_name(pipe));
1104
 
1105
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1106
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1107
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1108
}
1109
 
1110
/**
1111
 * intel_enable_pll - enable a PLL
1112
 * @dev_priv: i915 private structure
1113
 * @pipe: pipe PLL to enable
1114
 *
1115
 * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1116
 * make sure the PLL reg is writable first though, since the panel write
1117
 * protect mechanism may be enabled.
1118
 *
1119
 * Note!  This is for pre-ILK only.
1120
 */
1121
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1122
{
1123
    int reg;
1124
    u32 val;
1125
 
1126
    /* No really, not for ILK+ */
1127
    BUG_ON(dev_priv->info->gen >= 5);
1128
 
1129
    /* PLL is protected by panel, make sure we can write it */
1130
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1131
        assert_panel_unlocked(dev_priv, pipe);
1132
 
1133
    reg = DPLL(pipe);
1134
    val = I915_READ(reg);
1135
    val |= DPLL_VCO_ENABLE;
1136
 
1137
    /* We do this three times for luck */
1138
    I915_WRITE(reg, val);
1139
    POSTING_READ(reg);
1140
    udelay(150); /* wait for warmup */
1141
    I915_WRITE(reg, val);
1142
    POSTING_READ(reg);
1143
    udelay(150); /* wait for warmup */
1144
    I915_WRITE(reg, val);
1145
    POSTING_READ(reg);
1146
    udelay(150); /* wait for warmup */
1147
}
1148
 
1149
/**
1150
 * intel_disable_pll - disable a PLL
1151
 * @dev_priv: i915 private structure
1152
 * @pipe: pipe PLL to disable
1153
 *
1154
 * Disable the PLL for @pipe, making sure the pipe is off first.
1155
 *
1156
 * Note!  This is for pre-ILK only.
1157
 */
1158
static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1159
{
1160
	int reg;
1161
	u32 val;
1162
 
1163
	/* Don't disable pipe A or pipe A PLLs if needed */
1164
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1165
		return;
1166
 
1167
	/* Make sure the pipe isn't still relying on us */
1168
	assert_pipe_disabled(dev_priv, pipe);
1169
 
1170
	reg = DPLL(pipe);
1171
	val = I915_READ(reg);
1172
	val &= ~DPLL_VCO_ENABLE;
1173
	I915_WRITE(reg, val);
1174
	POSTING_READ(reg);
1175
}
1176
 
1177
/**
1178
 * intel_enable_pch_pll - enable PCH PLL
1179
 * @dev_priv: i915 private structure
1180
 * @pipe: pipe PLL to enable
1181
 *
1182
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1183
 * drives the transcoder clock.
1184
 */
1185
static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1186
				 enum pipe pipe)
1187
{
1188
	int reg;
1189
	u32 val;
1190
 
1191
	/* PCH only available on ILK+ */
1192
	BUG_ON(dev_priv->info->gen < 5);
1193
 
1194
	/* PCH refclock must be enabled first */
1195
	assert_pch_refclk_enabled(dev_priv);
1196
 
1197
	reg = PCH_DPLL(pipe);
1198
	val = I915_READ(reg);
1199
	val |= DPLL_VCO_ENABLE;
1200
	I915_WRITE(reg, val);
1201
	POSTING_READ(reg);
1202
	udelay(200);
1203
}
1204
 
1205
static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1206
				  enum pipe pipe)
1207
{
1208
	int reg;
1209
	u32 val;
1210
 
1211
	/* PCH only available on ILK+ */
1212
	BUG_ON(dev_priv->info->gen < 5);
1213
 
1214
	/* Make sure transcoder isn't still depending on us */
1215
	assert_transcoder_disabled(dev_priv, pipe);
1216
 
1217
	reg = PCH_DPLL(pipe);
1218
	val = I915_READ(reg);
1219
	val &= ~DPLL_VCO_ENABLE;
1220
	I915_WRITE(reg, val);
1221
	POSTING_READ(reg);
1222
	udelay(200);
1223
}
1224
 
1225
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1226
				    enum pipe pipe)
1227
{
1228
	int reg;
1229
	u32 val;
1230
 
1231
	/* PCH only available on ILK+ */
1232
	BUG_ON(dev_priv->info->gen < 5);
1233
 
1234
	/* Make sure PCH DPLL is enabled */
1235
	assert_pch_pll_enabled(dev_priv, pipe);
1236
 
1237
	/* FDI must be feeding us bits for PCH ports */
1238
	assert_fdi_tx_enabled(dev_priv, pipe);
1239
	assert_fdi_rx_enabled(dev_priv, pipe);
1240
 
1241
	reg = TRANSCONF(pipe);
1242
	val = I915_READ(reg);
1243
 
1244
	if (HAS_PCH_IBX(dev_priv->dev)) {
1245
		/*
1246
		 * make the BPC in transcoder be consistent with
1247
		 * that in pipeconf reg.
1248
		 */
1249
		val &= ~PIPE_BPC_MASK;
1250
		val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1251
	}
1252
	I915_WRITE(reg, val | TRANS_ENABLE);
1253
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1254
		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1255
}
1256
 
1257
static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1258
				     enum pipe pipe)
1259
{
1260
	int reg;
1261
	u32 val;
1262
 
1263
	/* FDI relies on the transcoder */
1264
	assert_fdi_tx_disabled(dev_priv, pipe);
1265
	assert_fdi_rx_disabled(dev_priv, pipe);
1266
 
1267
	/* Ports must be off as well */
1268
	assert_pch_ports_disabled(dev_priv, pipe);
1269
 
1270
	reg = TRANSCONF(pipe);
1271
	val = I915_READ(reg);
1272
	val &= ~TRANS_ENABLE;
1273
	I915_WRITE(reg, val);
1274
	/* wait for PCH transcoder off, transcoder state */
1275
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1276
		DRM_ERROR("failed to disable transcoder\n");
1277
}
1278
 
1279
/**
1280
 * intel_enable_pipe - enable a pipe, asserting requirements
1281
 * @dev_priv: i915 private structure
1282
 * @pipe: pipe to enable
1283
 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1284
 *
1285
 * Enable @pipe, making sure that various hardware specific requirements
1286
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1287
 *
1288
 * @pipe should be %PIPE_A or %PIPE_B.
1289
 *
1290
 * Will wait until the pipe is actually running (i.e. first vblank) before
1291
 * returning.
1292
 */
1293
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1294
			      bool pch_port)
1295
{
1296
	int reg;
1297
	u32 val;
1298
 
1299
	/*
1300
	 * A pipe without a PLL won't actually be able to drive bits from
1301
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1302
	 * need the check.
1303
	 */
1304
	if (!HAS_PCH_SPLIT(dev_priv->dev))
1305
		assert_pll_enabled(dev_priv, pipe);
1306
	else {
1307
		if (pch_port) {
1308
			/* if driving the PCH, we need FDI enabled */
1309
			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1310
			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1311
		}
1312
		/* FIXME: assert CPU port conditions for SNB+ */
1313
	}
1314
 
1315
	reg = PIPECONF(pipe);
1316
	val = I915_READ(reg);
1317
	if (val & PIPECONF_ENABLE)
1318
		return;
1319
 
1320
	I915_WRITE(reg, val | PIPECONF_ENABLE);
1321
	intel_wait_for_vblank(dev_priv->dev, pipe);
1322
}
1323
 
1324
/**
1325
 * intel_disable_pipe - disable a pipe, asserting requirements
1326
 * @dev_priv: i915 private structure
1327
 * @pipe: pipe to disable
1328
 *
1329
 * Disable @pipe, making sure that various hardware specific requirements
1330
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1331
 *
1332
 * @pipe should be %PIPE_A or %PIPE_B.
1333
 *
1334
 * Will wait until the pipe has shut down before returning.
1335
 */
1336
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1337
			       enum pipe pipe)
1338
{
1339
	int reg;
1340
	u32 val;
1341
 
1342
	/*
1343
	 * Make sure planes won't keep trying to pump pixels to us,
1344
	 * or we might hang the display.
1345
	 */
1346
	assert_planes_disabled(dev_priv, pipe);
1347
 
1348
	/* Don't disable pipe A or pipe A PLLs if needed */
1349
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1350
		return;
1351
 
1352
	reg = PIPECONF(pipe);
1353
	val = I915_READ(reg);
1354
	if ((val & PIPECONF_ENABLE) == 0)
1355
		return;
1356
 
1357
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1358
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1359
}
1360
 
1361
/*
1362
 * Plane regs are double buffered, going from enabled->disabled needs a
1363
 * trigger in order to latch.  The display address reg provides this.
1364
 */
1365
static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1366
				      enum plane plane)
1367
{
1368
	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1369
	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1370
}
1371
 
1372
/**
1373
 * intel_enable_plane - enable a display plane on a given pipe
1374
 * @dev_priv: i915 private structure
1375
 * @plane: plane to enable
1376
 * @pipe: pipe being fed
1377
 *
1378
 * Enable @plane on @pipe, making sure that @pipe is running first.
1379
 */
1380
static void intel_enable_plane(struct drm_i915_private *dev_priv,
1381
			       enum plane plane, enum pipe pipe)
1382
{
1383
	int reg;
1384
	u32 val;
1385
 
1386
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1387
	assert_pipe_enabled(dev_priv, pipe);
1388
 
1389
	reg = DSPCNTR(plane);
1390
	val = I915_READ(reg);
1391
	if (val & DISPLAY_PLANE_ENABLE)
1392
		return;
1393
 
1394
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1395
	intel_flush_display_plane(dev_priv, plane);
1396
	intel_wait_for_vblank(dev_priv->dev, pipe);
1397
}
1398
 
1399
/**
1400
 * intel_disable_plane - disable a display plane
1401
 * @dev_priv: i915 private structure
1402
 * @plane: plane to disable
1403
 * @pipe: pipe consuming the data
1404
 *
1405
 * Disable @plane; should be an independent operation.
1406
 */
1407
static void intel_disable_plane(struct drm_i915_private *dev_priv,
1408
				enum plane plane, enum pipe pipe)
1409
{
1410
	int reg;
1411
	u32 val;
1412
 
1413
	reg = DSPCNTR(plane);
1414
	val = I915_READ(reg);
1415
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1416
		return;
1417
 
1418
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1419
	intel_flush_display_plane(dev_priv, plane);
1420
	intel_wait_for_vblank(dev_priv->dev, pipe);
1421
}
1422
 
1423
static void disable_pch_dp(struct drm_i915_private *dev_priv,
1424
			   enum pipe pipe, int reg, u32 port_sel)
1425
{
1426
	u32 val = I915_READ(reg);
1427
	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1428
		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1429
		I915_WRITE(reg, val & ~DP_PORT_EN);
1430
	}
1431
}
1432
 
1433
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1434
			     enum pipe pipe, int reg)
1435
{
1436
	u32 val = I915_READ(reg);
1437
	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1438
		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1439
			      reg, pipe);
1440
		I915_WRITE(reg, val & ~PORT_ENABLE);
1441
	}
1442
}
1443
 
1444
/* Disable any ports connected to this transcoder */
1445
static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1446
				    enum pipe pipe)
1447
{
1448
	u32 reg, val;
1449
 
1450
	val = I915_READ(PCH_PP_CONTROL);
1451
	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1452
 
1453
	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1454
	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1455
	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1456
 
1457
	reg = PCH_ADPA;
1458
	val = I915_READ(reg);
1459
	if (adpa_pipe_enabled(dev_priv, val, pipe))
1460
		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1461
 
1462
	reg = PCH_LVDS;
1463
	val = I915_READ(reg);
1464
	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1465
		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1466
		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1467
		POSTING_READ(reg);
1468
		udelay(100);
1469
	}
1470
 
1471
	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1472
	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1473
	disable_pch_hdmi(dev_priv, pipe, HDMID);
1474
}
1475
 
1476
static void i8xx_disable_fbc(struct drm_device *dev)
1477
{
1478
    struct drm_i915_private *dev_priv = dev->dev_private;
1479
    u32 fbc_ctl;
1480
 
1481
    /* Disable compression */
1482
    fbc_ctl = I915_READ(FBC_CONTROL);
1483
    if ((fbc_ctl & FBC_CTL_EN) == 0)
1484
        return;
1485
 
1486
    fbc_ctl &= ~FBC_CTL_EN;
1487
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1488
 
1489
    /* Wait for compressing bit to clear */
1490
    if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1491
        DRM_DEBUG_KMS("FBC idle timed out\n");
1492
        return;
1493
    }
1494
 
1495
    DRM_DEBUG_KMS("disabled FBC\n");
1496
}
1497
 
1498
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1499
{
1500
    struct drm_device *dev = crtc->dev;
1501
    struct drm_i915_private *dev_priv = dev->dev_private;
1502
    struct drm_framebuffer *fb = crtc->fb;
1503
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1504
    struct drm_i915_gem_object *obj = intel_fb->obj;
1505
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1506
    int cfb_pitch;
1507
    int plane, i;
1508
    u32 fbc_ctl, fbc_ctl2;
1509
 
1510
    cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1511
    if (fb->pitch < cfb_pitch)
1512
        cfb_pitch = fb->pitch;
1513
 
1514
    /* FBC_CTL wants 64B units */
1515
    cfb_pitch = (cfb_pitch / 64) - 1;
1516
    plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1517
 
1518
    /* Clear old tags */
1519
    for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1520
        I915_WRITE(FBC_TAG + (i * 4), 0);
1521
 
1522
    /* Set it up... */
1523
    fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1524
    fbc_ctl2 |= plane;
1525
    I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1526
    I915_WRITE(FBC_FENCE_OFF, crtc->y);
1527
 
1528
    /* enable it... */
1529
    fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1530
    if (IS_I945GM(dev))
1531
        fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1532
    fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1533
    fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1534
    fbc_ctl |= obj->fence_reg;
1535
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1536
 
1537
    DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1538
              cfb_pitch, crtc->y, intel_crtc->plane);
1539
}
1540
 
1541
static bool i8xx_fbc_enabled(struct drm_device *dev)
1542
{
1543
    struct drm_i915_private *dev_priv = dev->dev_private;
1544
 
1545
    return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1546
}
1547
 
1548
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1549
{
1550
    struct drm_device *dev = crtc->dev;
1551
    struct drm_i915_private *dev_priv = dev->dev_private;
1552
    struct drm_framebuffer *fb = crtc->fb;
1553
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1554
    struct drm_i915_gem_object *obj = intel_fb->obj;
1555
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1556
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1557
    unsigned long stall_watermark = 200;
1558
    u32 dpfc_ctl;
1559
 
1560
    dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1561
    dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1562
    I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1563
 
1564
    I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1565
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1566
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1567
    I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1568
 
1569
    /* enable it... */
1570
    I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1571
 
1572
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1573
}
1574
 
1575
static void g4x_disable_fbc(struct drm_device *dev)
1576
{
1577
    struct drm_i915_private *dev_priv = dev->dev_private;
1578
    u32 dpfc_ctl;
1579
 
1580
    /* Disable compression */
1581
    dpfc_ctl = I915_READ(DPFC_CONTROL);
1582
    if (dpfc_ctl & DPFC_CTL_EN) {
1583
        dpfc_ctl &= ~DPFC_CTL_EN;
1584
        I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1585
 
1586
        DRM_DEBUG_KMS("disabled FBC\n");
1587
    }
1588
}
1589
 
1590
static bool g4x_fbc_enabled(struct drm_device *dev)
1591
{
1592
    struct drm_i915_private *dev_priv = dev->dev_private;
1593
 
1594
    return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1595
}
1596
 
1597
static void sandybridge_blit_fbc_update(struct drm_device *dev)
1598
{
1599
	struct drm_i915_private *dev_priv = dev->dev_private;
1600
	u32 blt_ecoskpd;
1601
 
1602
	/* Make sure blitter notifies FBC of writes */
1603
	gen6_gt_force_wake_get(dev_priv);
1604
	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1605
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1606
		GEN6_BLITTER_LOCK_SHIFT;
1607
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1608
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1609
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1610
	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1611
			 GEN6_BLITTER_LOCK_SHIFT);
1612
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1613
	POSTING_READ(GEN6_BLITTER_ECOSKPD);
1614
	gen6_gt_force_wake_put(dev_priv);
1615
}
1616
 
1617
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1618
{
1619
    struct drm_device *dev = crtc->dev;
1620
    struct drm_i915_private *dev_priv = dev->dev_private;
1621
    struct drm_framebuffer *fb = crtc->fb;
1622
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1623
    struct drm_i915_gem_object *obj = intel_fb->obj;
1624
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1625
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1626
    unsigned long stall_watermark = 200;
1627
    u32 dpfc_ctl;
1628
 
1629
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1630
    dpfc_ctl &= DPFC_RESERVED;
1631
    dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1632
    /* Set persistent mode for front-buffer rendering, ala X. */
1633
    dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1634
    dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1635
    I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1636
 
1637
    I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1638
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1639
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1640
    I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1641
    I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1642
    /* enable it... */
1643
    I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1644
 
1645
    if (IS_GEN6(dev)) {
1646
        I915_WRITE(SNB_DPFC_CTL_SA,
1647
               SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1648
        I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1649
        sandybridge_blit_fbc_update(dev);
1650
    }
1651
 
1652
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1653
}
1654
 
1655
static void ironlake_disable_fbc(struct drm_device *dev)
1656
{
1657
    struct drm_i915_private *dev_priv = dev->dev_private;
1658
    u32 dpfc_ctl;
1659
 
1660
    /* Disable compression */
1661
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1662
    if (dpfc_ctl & DPFC_CTL_EN) {
1663
        dpfc_ctl &= ~DPFC_CTL_EN;
1664
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1665
 
1666
        DRM_DEBUG_KMS("disabled FBC\n");
1667
    }
1668
}
1669
 
1670
static bool ironlake_fbc_enabled(struct drm_device *dev)
1671
{
1672
    struct drm_i915_private *dev_priv = dev->dev_private;
1673
 
1674
    return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1675
}
1676
 
1677
bool intel_fbc_enabled(struct drm_device *dev)
1678
{
1679
	struct drm_i915_private *dev_priv = dev->dev_private;
1680
 
1681
	if (!dev_priv->display.fbc_enabled)
1682
		return false;
1683
 
1684
	return dev_priv->display.fbc_enabled(dev);
1685
}
1686
 
1687
 
1688
 
1689
 
1690
 
1691
 
1692
 
1693
 
1694
 
1695
 
1696
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1697
{
1698
	struct intel_fbc_work *work;
1699
	struct drm_device *dev = crtc->dev;
1700
	struct drm_i915_private *dev_priv = dev->dev_private;
1701
 
1702
	if (!dev_priv->display.enable_fbc)
1703
		return;
1704
 
1705
//	intel_cancel_fbc_work(dev_priv);
1706
 
1707
//	work = kzalloc(sizeof *work, GFP_KERNEL);
1708
//	if (work == NULL) {
1709
//		dev_priv->display.enable_fbc(crtc, interval);
1710
//		return;
1711
//	}
1712
 
1713
//	work->crtc = crtc;
1714
//	work->fb = crtc->fb;
1715
//	work->interval = interval;
1716
//	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1717
 
1718
//	dev_priv->fbc_work = work;
1719
 
1720
	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1721
 
1722
	/* Delay the actual enabling to let pageflipping cease and the
1723
	 * display to settle before starting the compression. Note that
1724
	 * this delay also serves a second purpose: it allows for a
1725
	 * vblank to pass after disabling the FBC before we attempt
1726
	 * to modify the control registers.
1727
	 *
1728
	 * A more complicated solution would involve tracking vblanks
1729
	 * following the termination of the page-flipping sequence
1730
	 * and indeed performing the enable as a co-routine and not
1731
	 * waiting synchronously upon the vblank.
1732
	 */
1733
//	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1734
}
1735
 
1736
void intel_disable_fbc(struct drm_device *dev)
1737
{
1738
	struct drm_i915_private *dev_priv = dev->dev_private;
1739
 
1740
//   intel_cancel_fbc_work(dev_priv);
1741
 
1742
	if (!dev_priv->display.disable_fbc)
1743
		return;
1744
 
1745
	dev_priv->display.disable_fbc(dev);
1746
	dev_priv->cfb_plane = -1;
1747
}
1748
 
1749
/**
1750
 * intel_update_fbc - enable/disable FBC as needed
1751
 * @dev: the drm_device
1752
 *
1753
 * Set up the framebuffer compression hardware at mode set time.  We
1754
 * enable it if possible:
1755
 *   - plane A only (on pre-965)
1756
 *   - no pixel mulitply/line duplication
1757
 *   - no alpha buffer discard
1758
 *   - no dual wide
1759
 *   - framebuffer <= 2048 in width, 1536 in height
1760
 *
1761
 * We can't assume that any compression will take place (worst case),
1762
 * so the compressed buffer has to be the same size as the uncompressed
1763
 * one.  It also must reside (along with the line length buffer) in
1764
 * stolen memory.
1765
 *
1766
 * We need to enable/disable FBC on a global basis.
1767
 */
1768
static void intel_update_fbc(struct drm_device *dev)
1769
{
1770
	struct drm_i915_private *dev_priv = dev->dev_private;
1771
	struct drm_crtc *crtc = NULL, *tmp_crtc;
1772
	struct intel_crtc *intel_crtc;
1773
	struct drm_framebuffer *fb;
1774
	struct intel_framebuffer *intel_fb;
1775
	struct drm_i915_gem_object *obj;
1776
 
1777
	DRM_DEBUG_KMS("\n");
1778
 
1779
	if (!i915_powersave)
1780
		return;
1781
 
1782
	if (!I915_HAS_FBC(dev))
1783
		return;
1784
 
1785
	/*
1786
	 * If FBC is already on, we just have to verify that we can
1787
	 * keep it that way...
1788
	 * Need to disable if:
1789
	 *   - more than one pipe is active
1790
	 *   - changing FBC params (stride, fence, mode)
1791
	 *   - new fb is too large to fit in compressed buffer
1792
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
1793
	 */
1794
	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1795
		if (tmp_crtc->enabled && tmp_crtc->fb) {
1796
			if (crtc) {
1797
				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1798
//				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1799
				goto out_disable;
1800
			}
1801
			crtc = tmp_crtc;
1802
		}
1803
	}
1804
 
1805
	if (!crtc || crtc->fb == NULL) {
1806
		DRM_DEBUG_KMS("no output, disabling\n");
1807
//		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1808
		goto out_disable;
1809
	}
1810
 
1811
	intel_crtc = to_intel_crtc(crtc);
1812
	fb = crtc->fb;
1813
	intel_fb = to_intel_framebuffer(fb);
1814
	obj = intel_fb->obj;
1815
 
1816
	if (!i915_enable_fbc) {
1817
		DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
1818
//		dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1819
		goto out_disable;
1820
	}
1821
	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1822
		DRM_DEBUG_KMS("framebuffer too large, disabling "
1823
			      "compression\n");
1824
//		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1825
		goto out_disable;
1826
	}
1827
	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1828
	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1829
		DRM_DEBUG_KMS("mode incompatible with compression, "
1830
			      "disabling\n");
1831
//		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1832
		goto out_disable;
1833
	}
1834
	if ((crtc->mode.hdisplay > 2048) ||
1835
	    (crtc->mode.vdisplay > 1536)) {
1836
		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1837
//		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1838
		goto out_disable;
1839
	}
1840
	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1841
		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1842
//		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1843
		goto out_disable;
1844
	}
1845
 
1846
	/* The use of a CPU fence is mandatory in order to detect writes
1847
	 * by the CPU to the scanout and trigger updates to the FBC.
1848
	 */
1849
//	if (obj->tiling_mode != I915_TILING_X ||
1850
//	    obj->fence_reg == I915_FENCE_REG_NONE) {
1851
//		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1852
//		dev_priv->no_fbc_reason = FBC_NOT_TILED;
1853
//		goto out_disable;
1854
//	}
1855
 
1856
	/* If the kernel debugger is active, always disable compression */
1857
	if (in_dbg_master())
1858
		goto out_disable;
1859
 
1860
	/* If the scanout has not changed, don't modify the FBC settings.
1861
	 * Note that we make the fundamental assumption that the fb->obj
1862
	 * cannot be unpinned (and have its GTT offset and fence revoked)
1863
	 * without first being decoupled from the scanout and FBC disabled.
1864
	 */
1865
	if (dev_priv->cfb_plane == intel_crtc->plane &&
1866
	    dev_priv->cfb_fb == fb->base.id &&
1867
	    dev_priv->cfb_y == crtc->y)
1868
		return;
1869
 
1870
	if (intel_fbc_enabled(dev)) {
1871
		/* We update FBC along two paths, after changing fb/crtc
1872
		 * configuration (modeswitching) and after page-flipping
1873
		 * finishes. For the latter, we know that not only did
1874
		 * we disable the FBC at the start of the page-flip
1875
		 * sequence, but also more than one vblank has passed.
1876
		 *
1877
		 * For the former case of modeswitching, it is possible
1878
		 * to switch between two FBC valid configurations
1879
		 * instantaneously so we do need to disable the FBC
1880
		 * before we can modify its control registers. We also
1881
		 * have to wait for the next vblank for that to take
1882
		 * effect. However, since we delay enabling FBC we can
1883
		 * assume that a vblank has passed since disabling and
1884
		 * that we can safely alter the registers in the deferred
1885
		 * callback.
1886
		 *
1887
		 * In the scenario that we go from a valid to invalid
1888
		 * and then back to valid FBC configuration we have
1889
		 * no strict enforcement that a vblank occurred since
1890
		 * disabling the FBC. However, along all current pipe
1891
		 * disabling paths we do need to wait for a vblank at
1892
		 * some point. And we wait before enabling FBC anyway.
1893
		 */
1894
		DRM_DEBUG_KMS("disabling active FBC for update\n");
1895
		intel_disable_fbc(dev);
1896
	}
1897
 
1898
	intel_enable_fbc(crtc, 500);
1899
	return;
1900
 
1901
out_disable:
1902
	/* Multiple disables should be harmless */
1903
	if (intel_fbc_enabled(dev)) {
1904
		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1905
		intel_disable_fbc(dev);
1906
	}
1907
}
1908
 
1909
 
1910
 
1911
 
1912
 
1913
 
1914
 
1915
 
1916
 
1917
 
1918
 
1919
 
1920
 
1921
 
1922
 
1923
 
1924
 
1925
 
1926
 
1927
 
1928
 
1929
 
1930
 
1931
 
1932
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1933
                 int x, int y)
1934
{
1935
    struct drm_device *dev = crtc->dev;
1936
    struct drm_i915_private *dev_priv = dev->dev_private;
1937
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1938
    struct intel_framebuffer *intel_fb;
1939
    struct drm_i915_gem_object *obj;
1940
    int plane = intel_crtc->plane;
1941
    unsigned long Start, Offset;
1942
    u32 dspcntr;
1943
    u32 reg;
1944
 
1945
    switch (plane) {
1946
    case 0:
1947
    case 1:
1948
        break;
1949
    default:
1950
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1951
        return -EINVAL;
1952
    }
1953
 
1954
    intel_fb = to_intel_framebuffer(fb);
1955
    obj = intel_fb->obj;
1956
 
1957
    reg = DSPCNTR(plane);
1958
    dspcntr = I915_READ(reg);
1959
    /* Mask out pixel format bits in case we change it */
1960
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1961
    switch (fb->bits_per_pixel) {
1962
    case 8:
1963
        dspcntr |= DISPPLANE_8BPP;
1964
        break;
1965
    case 16:
1966
        if (fb->depth == 15)
1967
            dspcntr |= DISPPLANE_15_16BPP;
1968
        else
1969
            dspcntr |= DISPPLANE_16BPP;
1970
        break;
1971
    case 24:
1972
    case 32:
1973
        dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1974
        break;
1975
    default:
1976
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1977
        return -EINVAL;
1978
    }
1979
    if (INTEL_INFO(dev)->gen >= 4) {
1980
        if (obj->tiling_mode != I915_TILING_NONE)
1981
            dspcntr |= DISPPLANE_TILED;
1982
        else
1983
            dspcntr &= ~DISPPLANE_TILED;
1984
    }
1985
 
1986
    I915_WRITE(reg, dspcntr);
1987
 
1988
    Start = obj->gtt_offset;
1989
    Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
1990
 
1991
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1992
              Start, Offset, x, y, fb->pitch);
1993
    I915_WRITE(DSPSTRIDE(plane), fb->pitch);
1994
    if (INTEL_INFO(dev)->gen >= 4) {
1995
        I915_WRITE(DSPSURF(plane), Start);
1996
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1997
        I915_WRITE(DSPADDR(plane), Offset);
1998
    } else
1999
        I915_WRITE(DSPADDR(plane), Start + Offset);
2000
    POSTING_READ(reg);
2001
 
2002
    return 0;
2003
}
2004
 
2005
static int ironlake_update_plane(struct drm_crtc *crtc,
2006
                 struct drm_framebuffer *fb, int x, int y)
2007
{
2008
    struct drm_device *dev = crtc->dev;
2009
    struct drm_i915_private *dev_priv = dev->dev_private;
2010
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2011
    struct intel_framebuffer *intel_fb;
2012
    struct drm_i915_gem_object *obj;
2013
    int plane = intel_crtc->plane;
2014
    unsigned long Start, Offset;
2015
    u32 dspcntr;
2016
    u32 reg;
2017
 
2018
    switch (plane) {
2019
    case 0:
2020
    case 1:
2021
        break;
2022
    default:
2023
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2024
        return -EINVAL;
2025
    }
2026
 
2027
    intel_fb = to_intel_framebuffer(fb);
2028
    obj = intel_fb->obj;
2029
 
2030
    reg = DSPCNTR(plane);
2031
    dspcntr = I915_READ(reg);
2032
    /* Mask out pixel format bits in case we change it */
2033
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2034
    switch (fb->bits_per_pixel) {
2035
    case 8:
2036
        dspcntr |= DISPPLANE_8BPP;
2037
        break;
2038
    case 16:
2039
        if (fb->depth != 16)
2040
            return -EINVAL;
2041
 
2042
        dspcntr |= DISPPLANE_16BPP;
2043
        break;
2044
    case 24:
2045
    case 32:
2046
        if (fb->depth == 24)
2047
            dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2048
        else if (fb->depth == 30)
2049
            dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2050
        else
2051
            return -EINVAL;
2052
        break;
2053
    default:
2054
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2055
        return -EINVAL;
2056
    }
2057
 
2058
//    if (obj->tiling_mode != I915_TILING_NONE)
2059
//        dspcntr |= DISPPLANE_TILED;
2060
//    else
2061
        dspcntr &= ~DISPPLANE_TILED;
2062
 
2063
    /* must disable */
2064
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2065
 
2066
    I915_WRITE(reg, dspcntr);
2067
 
2068
//    Start = obj->gtt_offset;
2069
//    Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2070
 
2071
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2072
              Start, Offset, x, y, fb->pitch);
2073
//    I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2074
//    I915_WRITE(DSPSURF(plane), Start);
2075
//    I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2076
//    I915_WRITE(DSPADDR(plane), Offset);
2077
//    POSTING_READ(reg);
2078
 
2079
    return 0;
2080
}
2081
 
2082
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2083
static int
2084
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2085
			   int x, int y, enum mode_set_atomic state)
2086
{
2087
	struct drm_device *dev = crtc->dev;
2088
	struct drm_i915_private *dev_priv = dev->dev_private;
2089
	int ret;
2090
 
2091
	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2092
	if (ret)
2093
		return ret;
2094
 
2095
	intel_update_fbc(dev);
2096
	intel_increase_pllclock(crtc);
2097
 
2098
	return 0;
2099
}
2100
 
2101
static int
2102
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2103
		    struct drm_framebuffer *old_fb)
2104
{
2105
	struct drm_device *dev = crtc->dev;
2106
	struct drm_i915_master_private *master_priv;
2107
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2108
	int ret;
2109
 
2110
	/* no fb bound */
2111
	if (!crtc->fb) {
2112
		DRM_ERROR("No FB bound\n");
2113
		return 0;
2114
	}
2115
 
2116
	switch (intel_crtc->plane) {
2117
	case 0:
2118
	case 1:
2119
		break;
2120
	default:
2121
		DRM_ERROR("no plane for crtc\n");
2122
		return -EINVAL;
2123
	}
2124
 
2125
	mutex_lock(&dev->struct_mutex);
2126
//   ret = intel_pin_and_fence_fb_obj(dev,
2127
//                    to_intel_framebuffer(crtc->fb)->obj,
2128
//                    NULL);
2129
	if (ret != 0) {
2130
		mutex_unlock(&dev->struct_mutex);
2131
		DRM_ERROR("pin & fence failed\n");
2132
		return ret;
2133
	}
2134
 
2135
	if (old_fb) {
2136
		struct drm_i915_private *dev_priv = dev->dev_private;
2137
		struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2138
 
2139
//		wait_event(dev_priv->pending_flip_queue,
2140
//			   atomic_read(&dev_priv->mm.wedged) ||
2141
//			   atomic_read(&obj->pending_flip) == 0);
2142
 
2143
		/* Big Hammer, we also need to ensure that any pending
2144
		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2145
		 * current scanout is retired before unpinning the old
2146
		 * framebuffer.
2147
		 *
2148
		 * This should only fail upon a hung GPU, in which case we
2149
		 * can safely continue.
2150
		 */
2151
//       ret = i915_gem_object_finish_gpu(obj);
2152
		(void) ret;
2153
	}
2154
 
2155
	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2156
					 LEAVE_ATOMIC_MODE_SET);
2157
	if (ret) {
2158
//       i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2159
		mutex_unlock(&dev->struct_mutex);
2160
		DRM_ERROR("failed to update base address\n");
2161
		return ret;
2162
	}
2163
 
2164
	if (old_fb) {
2165
//       intel_wait_for_vblank(dev, intel_crtc->pipe);
2166
//       i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
2167
	}
2168
 
2169
	mutex_unlock(&dev->struct_mutex);
2170
 
2171
//	if (!dev->primary->master)
2172
//		return 0;
2173
 
2174
//	master_priv = dev->primary->master->driver_priv;
2175
//	if (!master_priv->sarea_priv)
2176
//		return 0;
2177
 
2178
//	if (intel_crtc->pipe) {
2179
//		master_priv->sarea_priv->pipeB_x = x;
2180
//		master_priv->sarea_priv->pipeB_y = y;
2181
//	} else {
2182
//		master_priv->sarea_priv->pipeA_x = x;
2183
//		master_priv->sarea_priv->pipeA_y = y;
2184
//	}
2185
 
2186
	return 0;
2187
}
2188
 
2189
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2190
{
2191
	struct drm_device *dev = crtc->dev;
2192
	struct drm_i915_private *dev_priv = dev->dev_private;
2193
	u32 dpa_ctl;
2194
 
2195
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2196
	dpa_ctl = I915_READ(DP_A);
2197
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2198
 
2199
	if (clock < 200000) {
2200
		u32 temp;
2201
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2202
		/* workaround for 160Mhz:
2203
		   1) program 0x4600c bits 15:0 = 0x8124
2204
		   2) program 0x46010 bit 0 = 1
2205
		   3) program 0x46034 bit 24 = 1
2206
		   4) program 0x64000 bit 14 = 1
2207
		   */
2208
		temp = I915_READ(0x4600c);
2209
		temp &= 0xffff0000;
2210
		I915_WRITE(0x4600c, temp | 0x8124);
2211
 
2212
		temp = I915_READ(0x46010);
2213
		I915_WRITE(0x46010, temp | 1);
2214
 
2215
		temp = I915_READ(0x46034);
2216
		I915_WRITE(0x46034, temp | (1 << 24));
2217
	} else {
2218
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2219
	}
2220
	I915_WRITE(DP_A, dpa_ctl);
2221
 
2222
	POSTING_READ(DP_A);
2223
	udelay(500);
2224
}
2225
 
2226
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2227
{
2228
	struct drm_device *dev = crtc->dev;
2229
	struct drm_i915_private *dev_priv = dev->dev_private;
2230
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2231
	int pipe = intel_crtc->pipe;
2232
	u32 reg, temp;
2233
 
2234
	/* enable normal train */
2235
	reg = FDI_TX_CTL(pipe);
2236
	temp = I915_READ(reg);
2237
	if (IS_IVYBRIDGE(dev)) {
2238
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2239
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2240
	} else {
2241
		temp &= ~FDI_LINK_TRAIN_NONE;
2242
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2243
	}
2244
	I915_WRITE(reg, temp);
2245
 
2246
	reg = FDI_RX_CTL(pipe);
2247
	temp = I915_READ(reg);
2248
	if (HAS_PCH_CPT(dev)) {
2249
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2250
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2251
	} else {
2252
		temp &= ~FDI_LINK_TRAIN_NONE;
2253
		temp |= FDI_LINK_TRAIN_NONE;
2254
	}
2255
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2256
 
2257
	/* wait one idle pattern time */
2258
	POSTING_READ(reg);
2259
	udelay(1000);
2260
 
2261
	/* IVB wants error correction enabled */
2262
	if (IS_IVYBRIDGE(dev))
2263
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2264
			   FDI_FE_ERRC_ENABLE);
2265
}
2266
 
2267
static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2268
{
2269
	struct drm_i915_private *dev_priv = dev->dev_private;
2270
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2271
 
2272
	flags |= FDI_PHASE_SYNC_OVR(pipe);
2273
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2274
	flags |= FDI_PHASE_SYNC_EN(pipe);
2275
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2276
	POSTING_READ(SOUTH_CHICKEN1);
2277
}
2278
 
2279
/* The FDI link training functions for ILK/Ibexpeak. */
2280
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2281
{
2282
    struct drm_device *dev = crtc->dev;
2283
    struct drm_i915_private *dev_priv = dev->dev_private;
2284
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2285
    int pipe = intel_crtc->pipe;
2286
    int plane = intel_crtc->plane;
2287
    u32 reg, temp, tries;
2288
 
2289
    /* FDI needs bits from pipe & plane first */
2290
    assert_pipe_enabled(dev_priv, pipe);
2291
    assert_plane_enabled(dev_priv, plane);
2292
 
2293
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2294
       for train result */
2295
    reg = FDI_RX_IMR(pipe);
2296
    temp = I915_READ(reg);
2297
    temp &= ~FDI_RX_SYMBOL_LOCK;
2298
    temp &= ~FDI_RX_BIT_LOCK;
2299
    I915_WRITE(reg, temp);
2300
    I915_READ(reg);
2301
    udelay(150);
2302
 
2303
    /* enable CPU FDI TX and PCH FDI RX */
2304
    reg = FDI_TX_CTL(pipe);
2305
    temp = I915_READ(reg);
2306
    temp &= ~(7 << 19);
2307
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2308
    temp &= ~FDI_LINK_TRAIN_NONE;
2309
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2310
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2311
 
2312
    reg = FDI_RX_CTL(pipe);
2313
    temp = I915_READ(reg);
2314
    temp &= ~FDI_LINK_TRAIN_NONE;
2315
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2316
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2317
 
2318
    POSTING_READ(reg);
2319
    udelay(150);
2320
 
2321
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2322
    if (HAS_PCH_IBX(dev)) {
2323
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2324
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2325
               FDI_RX_PHASE_SYNC_POINTER_EN);
2326
    }
2327
 
2328
    reg = FDI_RX_IIR(pipe);
2329
    for (tries = 0; tries < 5; tries++) {
2330
        temp = I915_READ(reg);
2331
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2332
 
2333
        if ((temp & FDI_RX_BIT_LOCK)) {
2334
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2335
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2336
            break;
2337
        }
2338
    }
2339
    if (tries == 5)
2340
        DRM_ERROR("FDI train 1 fail!\n");
2341
 
2342
    /* Train 2 */
2343
    reg = FDI_TX_CTL(pipe);
2344
    temp = I915_READ(reg);
2345
    temp &= ~FDI_LINK_TRAIN_NONE;
2346
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2347
    I915_WRITE(reg, temp);
2348
 
2349
    reg = FDI_RX_CTL(pipe);
2350
    temp = I915_READ(reg);
2351
    temp &= ~FDI_LINK_TRAIN_NONE;
2352
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2353
    I915_WRITE(reg, temp);
2354
 
2355
    POSTING_READ(reg);
2356
    udelay(150);
2357
 
2358
    reg = FDI_RX_IIR(pipe);
2359
    for (tries = 0; tries < 5; tries++) {
2360
        temp = I915_READ(reg);
2361
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2362
 
2363
        if (temp & FDI_RX_SYMBOL_LOCK) {
2364
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2365
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2366
            break;
2367
        }
2368
    }
2369
    if (tries == 5)
2370
        DRM_ERROR("FDI train 2 fail!\n");
2371
 
2372
    DRM_DEBUG_KMS("FDI train done\n");
2373
 
2374
}
2375
 
2376
static const int snb_b_fdi_train_param [] = {
2377
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2378
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2379
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2380
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2381
};
2382
 
2383
/* The FDI link training functions for SNB/Cougarpoint. */
2384
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2385
{
2386
    struct drm_device *dev = crtc->dev;
2387
    struct drm_i915_private *dev_priv = dev->dev_private;
2388
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2389
    int pipe = intel_crtc->pipe;
2390
    u32 reg, temp, i;
2391
 
2392
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2393
       for train result */
2394
    reg = FDI_RX_IMR(pipe);
2395
    temp = I915_READ(reg);
2396
    temp &= ~FDI_RX_SYMBOL_LOCK;
2397
    temp &= ~FDI_RX_BIT_LOCK;
2398
    I915_WRITE(reg, temp);
2399
 
2400
    POSTING_READ(reg);
2401
    udelay(150);
2402
 
2403
    /* enable CPU FDI TX and PCH FDI RX */
2404
    reg = FDI_TX_CTL(pipe);
2405
    temp = I915_READ(reg);
2406
    temp &= ~(7 << 19);
2407
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2408
    temp &= ~FDI_LINK_TRAIN_NONE;
2409
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2410
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2411
    /* SNB-B */
2412
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2413
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2414
 
2415
    reg = FDI_RX_CTL(pipe);
2416
    temp = I915_READ(reg);
2417
    if (HAS_PCH_CPT(dev)) {
2418
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2419
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2420
    } else {
2421
        temp &= ~FDI_LINK_TRAIN_NONE;
2422
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2423
    }
2424
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2425
 
2426
    POSTING_READ(reg);
2427
    udelay(150);
2428
 
2429
    if (HAS_PCH_CPT(dev))
2430
        cpt_phase_pointer_enable(dev, pipe);
2431
 
2432
    for (i = 0; i < 4; i++ ) {
2433
        reg = FDI_TX_CTL(pipe);
2434
        temp = I915_READ(reg);
2435
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2436
        temp |= snb_b_fdi_train_param[i];
2437
        I915_WRITE(reg, temp);
2438
 
2439
        POSTING_READ(reg);
2440
        udelay(500);
2441
 
2442
        reg = FDI_RX_IIR(pipe);
2443
        temp = I915_READ(reg);
2444
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2445
 
2446
        if (temp & FDI_RX_BIT_LOCK) {
2447
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2448
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2449
            break;
2450
        }
2451
    }
2452
    if (i == 4)
2453
        DRM_ERROR("FDI train 1 fail!\n");
2454
 
2455
    /* Train 2 */
2456
    reg = FDI_TX_CTL(pipe);
2457
    temp = I915_READ(reg);
2458
    temp &= ~FDI_LINK_TRAIN_NONE;
2459
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2460
    if (IS_GEN6(dev)) {
2461
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2462
        /* SNB-B */
2463
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2464
    }
2465
    I915_WRITE(reg, temp);
2466
 
2467
    reg = FDI_RX_CTL(pipe);
2468
    temp = I915_READ(reg);
2469
    if (HAS_PCH_CPT(dev)) {
2470
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2471
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2472
    } else {
2473
        temp &= ~FDI_LINK_TRAIN_NONE;
2474
        temp |= FDI_LINK_TRAIN_PATTERN_2;
2475
    }
2476
    I915_WRITE(reg, temp);
2477
 
2478
    POSTING_READ(reg);
2479
    udelay(150);
2480
 
2481
    for (i = 0; i < 4; i++ ) {
2482
        reg = FDI_TX_CTL(pipe);
2483
        temp = I915_READ(reg);
2484
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2485
        temp |= snb_b_fdi_train_param[i];
2486
        I915_WRITE(reg, temp);
2487
 
2488
        POSTING_READ(reg);
2489
        udelay(500);
2490
 
2491
        reg = FDI_RX_IIR(pipe);
2492
        temp = I915_READ(reg);
2493
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2494
 
2495
        if (temp & FDI_RX_SYMBOL_LOCK) {
2496
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2497
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2498
            break;
2499
        }
2500
    }
2501
    if (i == 4)
2502
        DRM_ERROR("FDI train 2 fail!\n");
2503
 
2504
    DRM_DEBUG_KMS("FDI train done.\n");
2505
}
2506
 
2507
/* Manual link training for Ivy Bridge A0 parts */
2508
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2509
{
2510
    struct drm_device *dev = crtc->dev;
2511
    struct drm_i915_private *dev_priv = dev->dev_private;
2512
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2513
    int pipe = intel_crtc->pipe;
2514
    u32 reg, temp, i;
2515
 
2516
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2517
       for train result */
2518
    reg = FDI_RX_IMR(pipe);
2519
    temp = I915_READ(reg);
2520
    temp &= ~FDI_RX_SYMBOL_LOCK;
2521
    temp &= ~FDI_RX_BIT_LOCK;
2522
    I915_WRITE(reg, temp);
2523
 
2524
    POSTING_READ(reg);
2525
    udelay(150);
2526
 
2527
    /* enable CPU FDI TX and PCH FDI RX */
2528
    reg = FDI_TX_CTL(pipe);
2529
    temp = I915_READ(reg);
2530
    temp &= ~(7 << 19);
2531
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2532
    temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2533
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2534
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2535
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2536
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2537
 
2538
    reg = FDI_RX_CTL(pipe);
2539
    temp = I915_READ(reg);
2540
    temp &= ~FDI_LINK_TRAIN_AUTO;
2541
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2542
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2543
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2544
 
2545
    POSTING_READ(reg);
2546
    udelay(150);
2547
 
2548
    if (HAS_PCH_CPT(dev))
2549
        cpt_phase_pointer_enable(dev, pipe);
2550
 
2551
    for (i = 0; i < 4; i++ ) {
2552
        reg = FDI_TX_CTL(pipe);
2553
        temp = I915_READ(reg);
2554
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2555
        temp |= snb_b_fdi_train_param[i];
2556
        I915_WRITE(reg, temp);
2557
 
2558
        POSTING_READ(reg);
2559
        udelay(500);
2560
 
2561
        reg = FDI_RX_IIR(pipe);
2562
        temp = I915_READ(reg);
2563
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2564
 
2565
        if (temp & FDI_RX_BIT_LOCK ||
2566
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2567
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2568
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2569
            break;
2570
        }
2571
    }
2572
    if (i == 4)
2573
        DRM_ERROR("FDI train 1 fail!\n");
2574
 
2575
    /* Train 2 */
2576
    reg = FDI_TX_CTL(pipe);
2577
    temp = I915_READ(reg);
2578
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2579
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2580
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2581
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2582
    I915_WRITE(reg, temp);
2583
 
2584
    reg = FDI_RX_CTL(pipe);
2585
    temp = I915_READ(reg);
2586
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2587
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2588
    I915_WRITE(reg, temp);
2589
 
2590
    POSTING_READ(reg);
2591
    udelay(150);
2592
 
2593
    for (i = 0; i < 4; i++ ) {
2594
        reg = FDI_TX_CTL(pipe);
2595
        temp = I915_READ(reg);
2596
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2597
        temp |= snb_b_fdi_train_param[i];
2598
        I915_WRITE(reg, temp);
2599
 
2600
        POSTING_READ(reg);
2601
        udelay(500);
2602
 
2603
        reg = FDI_RX_IIR(pipe);
2604
        temp = I915_READ(reg);
2605
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2606
 
2607
        if (temp & FDI_RX_SYMBOL_LOCK) {
2608
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2609
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2610
            break;
2611
        }
2612
    }
2613
    if (i == 4)
2614
        DRM_ERROR("FDI train 2 fail!\n");
2615
 
2616
    DRM_DEBUG_KMS("FDI train done.\n");
2617
}
2618
 
2619
static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2620
{
2621
	struct drm_device *dev = crtc->dev;
2622
	struct drm_i915_private *dev_priv = dev->dev_private;
2623
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2624
	int pipe = intel_crtc->pipe;
2625
	u32 reg, temp;
2626
 
2627
	/* Write the TU size bits so error detection works */
2628
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2629
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2630
 
2631
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2632
	reg = FDI_RX_CTL(pipe);
2633
	temp = I915_READ(reg);
2634
	temp &= ~((0x7 << 19) | (0x7 << 16));
2635
	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2636
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2637
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2638
 
2639
	POSTING_READ(reg);
2640
	udelay(200);
2641
 
2642
	/* Switch from Rawclk to PCDclk */
2643
	temp = I915_READ(reg);
2644
	I915_WRITE(reg, temp | FDI_PCDCLK);
2645
 
2646
	POSTING_READ(reg);
2647
	udelay(200);
2648
 
2649
	/* Enable CPU FDI TX PLL, always on for Ironlake */
2650
	reg = FDI_TX_CTL(pipe);
2651
	temp = I915_READ(reg);
2652
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2653
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2654
 
2655
		POSTING_READ(reg);
2656
		udelay(100);
2657
	}
2658
}
2659
 
2660
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2661
{
2662
	struct drm_i915_private *dev_priv = dev->dev_private;
2663
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2664
 
2665
	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2666
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2667
	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2668
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2669
	POSTING_READ(SOUTH_CHICKEN1);
2670
}
2671
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2672
{
2673
	struct drm_device *dev = crtc->dev;
2674
	struct drm_i915_private *dev_priv = dev->dev_private;
2675
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2676
	int pipe = intel_crtc->pipe;
2677
	u32 reg, temp;
2678
 
2679
	/* disable CPU FDI tx and PCH FDI rx */
2680
	reg = FDI_TX_CTL(pipe);
2681
	temp = I915_READ(reg);
2682
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2683
	POSTING_READ(reg);
2684
 
2685
	reg = FDI_RX_CTL(pipe);
2686
	temp = I915_READ(reg);
2687
	temp &= ~(0x7 << 16);
2688
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2689
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2690
 
2691
	POSTING_READ(reg);
2692
	udelay(100);
2693
 
2694
	/* Ironlake workaround, disable clock pointer after downing FDI */
2695
	if (HAS_PCH_IBX(dev)) {
2696
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2697
		I915_WRITE(FDI_RX_CHICKEN(pipe),
2698
			   I915_READ(FDI_RX_CHICKEN(pipe) &
2699
				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2700
	} else if (HAS_PCH_CPT(dev)) {
2701
		cpt_phase_pointer_disable(dev, pipe);
2702
	}
2703
 
2704
	/* still set train pattern 1 */
2705
	reg = FDI_TX_CTL(pipe);
2706
	temp = I915_READ(reg);
2707
	temp &= ~FDI_LINK_TRAIN_NONE;
2708
	temp |= FDI_LINK_TRAIN_PATTERN_1;
2709
	I915_WRITE(reg, temp);
2710
 
2711
	reg = FDI_RX_CTL(pipe);
2712
	temp = I915_READ(reg);
2713
	if (HAS_PCH_CPT(dev)) {
2714
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2715
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2716
	} else {
2717
		temp &= ~FDI_LINK_TRAIN_NONE;
2718
		temp |= FDI_LINK_TRAIN_PATTERN_1;
2719
	}
2720
	/* BPC in FDI rx is consistent with that in PIPECONF */
2721
	temp &= ~(0x07 << 16);
2722
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2723
	I915_WRITE(reg, temp);
2724
 
2725
	POSTING_READ(reg);
2726
	udelay(100);
2727
}
2728
 
2729
/*
2730
 * When we disable a pipe, we need to clear any pending scanline wait events
2731
 * to avoid hanging the ring, which we assume we are waiting on.
2732
 */
2733
static void intel_clear_scanline_wait(struct drm_device *dev)
2734
{
2735
	struct drm_i915_private *dev_priv = dev->dev_private;
2736
	struct intel_ring_buffer *ring;
2737
	u32 tmp;
2738
 
2739
	if (IS_GEN2(dev))
2740
		/* Can't break the hang on i8xx */
2741
		return;
2742
 
2743
	ring = LP_RING(dev_priv);
2744
	tmp = I915_READ_CTL(ring);
2745
	if (tmp & RING_WAIT)
2746
		I915_WRITE_CTL(ring, tmp);
2747
}
2748
 
2749
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2750
{
2751
	struct drm_i915_gem_object *obj;
2752
	struct drm_i915_private *dev_priv;
2753
 
2754
	if (crtc->fb == NULL)
2755
		return;
2756
 
2757
	obj = to_intel_framebuffer(crtc->fb)->obj;
2758
	dev_priv = crtc->dev->dev_private;
2759
//	wait_event(dev_priv->pending_flip_queue,
2760
//		   atomic_read(&obj->pending_flip) == 0);
2761
}
2762
 
2763
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2764
{
2765
	struct drm_device *dev = crtc->dev;
2766
	struct drm_mode_config *mode_config = &dev->mode_config;
2767
	struct intel_encoder *encoder;
2768
 
2769
	/*
2770
	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2771
	 * must be driven by its own crtc; no sharing is possible.
2772
	 */
2773
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2774
		if (encoder->base.crtc != crtc)
2775
			continue;
2776
 
2777
		switch (encoder->type) {
2778
		case INTEL_OUTPUT_EDP:
2779
			if (!intel_encoder_is_pch_edp(&encoder->base))
2780
				return false;
2781
			continue;
2782
		}
2783
	}
2784
 
2785
	return true;
2786
}
2787
 
2788
/*
2789
 * Enable PCH resources required for PCH ports:
2790
 *   - PCH PLLs
2791
 *   - FDI training & RX/TX
2792
 *   - update transcoder timings
2793
 *   - DP transcoding bits
2794
 *   - transcoder
2795
 */
2796
static void ironlake_pch_enable(struct drm_crtc *crtc)
2797
{
2798
	struct drm_device *dev = crtc->dev;
2799
	struct drm_i915_private *dev_priv = dev->dev_private;
2800
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2801
	int pipe = intel_crtc->pipe;
2802
	u32 reg, temp;
2803
 
2804
	/* For PCH output, training FDI link */
2805
	dev_priv->display.fdi_link_train(crtc);
2806
 
2807
	intel_enable_pch_pll(dev_priv, pipe);
2808
 
2809
	if (HAS_PCH_CPT(dev)) {
2810
		/* Be sure PCH DPLL SEL is set */
2811
		temp = I915_READ(PCH_DPLL_SEL);
2812
		if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
2813
			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2814
		else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
2815
			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2816
		I915_WRITE(PCH_DPLL_SEL, temp);
2817
	}
2818
 
2819
	/* set transcoder timing, panel must allow it */
2820
	assert_panel_unlocked(dev_priv, pipe);
2821
	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2822
	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2823
	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2824
 
2825
	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2826
	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2827
	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2828
 
2829
	intel_fdi_normal_train(crtc);
2830
 
2831
	/* For PCH DP, enable TRANS_DP_CTL */
2832
	if (HAS_PCH_CPT(dev) &&
2833
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2834
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2835
		reg = TRANS_DP_CTL(pipe);
2836
		temp = I915_READ(reg);
2837
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2838
			  TRANS_DP_SYNC_MASK |
2839
			  TRANS_DP_BPC_MASK);
2840
		temp |= (TRANS_DP_OUTPUT_ENABLE |
2841
			 TRANS_DP_ENH_FRAMING);
2842
		temp |= bpc << 9; /* same format but at 11:9 */
2843
 
2844
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2845
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2846
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2847
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2848
 
2849
		switch (intel_trans_dp_port_sel(crtc)) {
2850
		case PCH_DP_B:
2851
			temp |= TRANS_DP_PORT_SEL_B;
2852
			break;
2853
		case PCH_DP_C:
2854
			temp |= TRANS_DP_PORT_SEL_C;
2855
			break;
2856
		case PCH_DP_D:
2857
			temp |= TRANS_DP_PORT_SEL_D;
2858
			break;
2859
		default:
2860
			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2861
			temp |= TRANS_DP_PORT_SEL_B;
2862
			break;
2863
		}
2864
 
2865
		I915_WRITE(reg, temp);
2866
	}
2867
 
2868
	intel_enable_transcoder(dev_priv, pipe);
2869
}
2870
 
2871
static void ironlake_crtc_enable(struct drm_crtc *crtc)
2872
{
2873
    struct drm_device *dev = crtc->dev;
2874
    struct drm_i915_private *dev_priv = dev->dev_private;
2875
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2876
    int pipe = intel_crtc->pipe;
2877
    int plane = intel_crtc->plane;
2878
    u32 temp;
2879
    bool is_pch_port;
2880
 
2881
    if (intel_crtc->active)
2882
        return;
2883
 
2884
    intel_crtc->active = true;
2885
    intel_update_watermarks(dev);
2886
 
2887
    if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2888
        temp = I915_READ(PCH_LVDS);
2889
        if ((temp & LVDS_PORT_EN) == 0)
2890
            I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2891
    }
2892
 
2893
    is_pch_port = intel_crtc_driving_pch(crtc);
2894
 
2895
    if (is_pch_port)
2896
        ironlake_fdi_pll_enable(crtc);
2897
    else
2898
        ironlake_fdi_disable(crtc);
2899
 
2900
    /* Enable panel fitting for LVDS */
2901
    if (dev_priv->pch_pf_size &&
2902
        (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2903
        /* Force use of hard-coded filter coefficients
2904
         * as some pre-programmed values are broken,
2905
         * e.g. x201.
2906
         */
2907
        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
2908
        I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
2909
        I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
2910
    }
2911
 
2912
    /*
2913
     * On ILK+ LUT must be loaded before the pipe is running but with
2914
     * clocks enabled
2915
     */
2916
    intel_crtc_load_lut(crtc);
2917
 
2918
    intel_enable_pipe(dev_priv, pipe, is_pch_port);
2919
    intel_enable_plane(dev_priv, plane, pipe);
2920
 
2921
    if (is_pch_port)
2922
        ironlake_pch_enable(crtc);
2923
 
2924
    mutex_lock(&dev->struct_mutex);
2925
    intel_update_fbc(dev);
2926
    mutex_unlock(&dev->struct_mutex);
2927
 
2928
//    intel_crtc_update_cursor(crtc, true);
2929
}
2930
 
2931
static void ironlake_crtc_disable(struct drm_crtc *crtc)
2932
{
2933
    struct drm_device *dev = crtc->dev;
2934
    struct drm_i915_private *dev_priv = dev->dev_private;
2935
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2936
    int pipe = intel_crtc->pipe;
2937
    int plane = intel_crtc->plane;
2938
    u32 reg, temp;
2939
 
2940
    if (!intel_crtc->active)
2941
        return;
2942
 
2943
    intel_crtc_wait_for_pending_flips(crtc);
2944
//    drm_vblank_off(dev, pipe);
2945
//    intel_crtc_update_cursor(crtc, false);
2946
 
2947
    intel_disable_plane(dev_priv, plane, pipe);
2948
 
2949
    if (dev_priv->cfb_plane == plane)
2950
        intel_disable_fbc(dev);
2951
 
2952
    intel_disable_pipe(dev_priv, pipe);
2953
 
2954
    /* Disable PF */
2955
    I915_WRITE(PF_CTL(pipe), 0);
2956
    I915_WRITE(PF_WIN_SZ(pipe), 0);
2957
 
2958
    ironlake_fdi_disable(crtc);
2959
 
2960
    /* This is a horrible layering violation; we should be doing this in
2961
     * the connector/encoder ->prepare instead, but we don't always have
2962
     * enough information there about the config to know whether it will
2963
     * actually be necessary or just cause undesired flicker.
2964
     */
2965
    intel_disable_pch_ports(dev_priv, pipe);
2966
 
2967
    intel_disable_transcoder(dev_priv, pipe);
2968
 
2969
    if (HAS_PCH_CPT(dev)) {
2970
        /* disable TRANS_DP_CTL */
2971
        reg = TRANS_DP_CTL(pipe);
2972
        temp = I915_READ(reg);
2973
        temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
2974
        temp |= TRANS_DP_PORT_SEL_NONE;
2975
        I915_WRITE(reg, temp);
2976
 
2977
        /* disable DPLL_SEL */
2978
        temp = I915_READ(PCH_DPLL_SEL);
2979
        switch (pipe) {
2980
        case 0:
2981
            temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2982
            break;
2983
        case 1:
2984
            temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2985
            break;
2986
        case 2:
2987
            /* FIXME: manage transcoder PLLs? */
2988
            temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
2989
            break;
2990
        default:
2991
            BUG(); /* wtf */
2992
        }
2993
        I915_WRITE(PCH_DPLL_SEL, temp);
2994
    }
2995
 
2996
    /* disable PCH DPLL */
2997
    intel_disable_pch_pll(dev_priv, pipe);
2998
 
2999
    /* Switch from PCDclk to Rawclk */
3000
    reg = FDI_RX_CTL(pipe);
3001
    temp = I915_READ(reg);
3002
    I915_WRITE(reg, temp & ~FDI_PCDCLK);
3003
 
3004
    /* Disable CPU FDI TX PLL */
3005
    reg = FDI_TX_CTL(pipe);
3006
    temp = I915_READ(reg);
3007
    I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3008
 
3009
    POSTING_READ(reg);
3010
    udelay(100);
3011
 
3012
    reg = FDI_RX_CTL(pipe);
3013
    temp = I915_READ(reg);
3014
    I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3015
 
3016
    /* Wait for the clocks to turn off. */
3017
    POSTING_READ(reg);
3018
    udelay(100);
3019
 
3020
    intel_crtc->active = false;
3021
    intel_update_watermarks(dev);
3022
 
3023
    mutex_lock(&dev->struct_mutex);
3024
    intel_update_fbc(dev);
3025
    intel_clear_scanline_wait(dev);
3026
    mutex_unlock(&dev->struct_mutex);
3027
}
3028
 
3029
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3030
{
3031
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3032
    int pipe = intel_crtc->pipe;
3033
    int plane = intel_crtc->plane;
3034
 
3035
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3036
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3037
     */
3038
    switch (mode) {
3039
    case DRM_MODE_DPMS_ON:
3040
    case DRM_MODE_DPMS_STANDBY:
3041
    case DRM_MODE_DPMS_SUSPEND:
3042
        DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3043
        ironlake_crtc_enable(crtc);
3044
        break;
3045
 
3046
    case DRM_MODE_DPMS_OFF:
3047
        DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3048
        ironlake_crtc_disable(crtc);
3049
        break;
3050
    }
3051
}
3052
 
3053
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3054
{
3055
	if (!enable && intel_crtc->overlay) {
3056
		struct drm_device *dev = intel_crtc->base.dev;
3057
		struct drm_i915_private *dev_priv = dev->dev_private;
3058
 
3059
		mutex_lock(&dev->struct_mutex);
3060
		dev_priv->mm.interruptible = false;
3061
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3062
		dev_priv->mm.interruptible = true;
3063
		mutex_unlock(&dev->struct_mutex);
3064
	}
3065
 
3066
	/* Let userspace switch the overlay on again. In most cases userspace
3067
	 * has to recompute where to put it anyway.
3068
	 */
3069
}
3070
 
3071
static void i9xx_crtc_enable(struct drm_crtc *crtc)
3072
{
3073
    struct drm_device *dev = crtc->dev;
3074
    struct drm_i915_private *dev_priv = dev->dev_private;
3075
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3076
    int pipe = intel_crtc->pipe;
3077
    int plane = intel_crtc->plane;
3078
 
3079
    if (intel_crtc->active)
3080
        return;
3081
 
3082
    intel_crtc->active = true;
3083
    intel_update_watermarks(dev);
3084
 
3085
    intel_enable_pll(dev_priv, pipe);
3086
    intel_enable_pipe(dev_priv, pipe, false);
3087
    intel_enable_plane(dev_priv, plane, pipe);
3088
 
3089
    intel_crtc_load_lut(crtc);
3090
    intel_update_fbc(dev);
3091
 
3092
    /* Give the overlay scaler a chance to enable if it's on this pipe */
3093
    intel_crtc_dpms_overlay(intel_crtc, true);
3094
//    intel_crtc_update_cursor(crtc, true);
3095
}
3096
 
3097
static void i9xx_crtc_disable(struct drm_crtc *crtc)
3098
{
3099
    struct drm_device *dev = crtc->dev;
3100
    struct drm_i915_private *dev_priv = dev->dev_private;
3101
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3102
    int pipe = intel_crtc->pipe;
3103
    int plane = intel_crtc->plane;
3104
 
3105
    if (!intel_crtc->active)
3106
        return;
3107
 
3108
    /* Give the overlay scaler a chance to disable if it's on this pipe */
3109
    intel_crtc_wait_for_pending_flips(crtc);
3110
//    drm_vblank_off(dev, pipe);
3111
    intel_crtc_dpms_overlay(intel_crtc, false);
3112
//    intel_crtc_update_cursor(crtc, false);
3113
 
3114
    if (dev_priv->cfb_plane == plane)
3115
        intel_disable_fbc(dev);
3116
 
3117
    intel_disable_plane(dev_priv, plane, pipe);
3118
    intel_disable_pipe(dev_priv, pipe);
3119
    intel_disable_pll(dev_priv, pipe);
3120
 
3121
    intel_crtc->active = false;
3122
    intel_update_fbc(dev);
3123
    intel_update_watermarks(dev);
3124
    intel_clear_scanline_wait(dev);
3125
}
3126
 
3127
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3128
{
3129
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3130
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3131
     */
3132
    switch (mode) {
3133
    case DRM_MODE_DPMS_ON:
3134
    case DRM_MODE_DPMS_STANDBY:
3135
    case DRM_MODE_DPMS_SUSPEND:
3136
        i9xx_crtc_enable(crtc);
3137
        break;
3138
    case DRM_MODE_DPMS_OFF:
3139
        i9xx_crtc_disable(crtc);
3140
        break;
3141
    }
3142
}
3143
 
3144
 
3145
 
3146
 
3147
 
3148
 
3149
 
3150
 
3151
 
3152
 
3153
 
3154
 
3155
 
3156
 
3157
 
3158
 
3159
 
3160
 
3161
static int i945_get_display_clock_speed(struct drm_device *dev)
3162
{
3163
	return 400000;
3164
}
3165
 
3166
static int i915_get_display_clock_speed(struct drm_device *dev)
3167
{
3168
	return 333000;
3169
}
3170
 
3171
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3172
{
3173
	return 200000;
3174
}
3175
 
3176
static int i915gm_get_display_clock_speed(struct drm_device *dev)
3177
{
3178
	u16 gcfgc = 0;
3179
 
3180
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3181
 
3182
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3183
		return 133000;
3184
	else {
3185
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3186
		case GC_DISPLAY_CLOCK_333_MHZ:
3187
			return 333000;
3188
		default:
3189
		case GC_DISPLAY_CLOCK_190_200_MHZ:
3190
			return 190000;
3191
		}
3192
	}
3193
}
3194
 
3195
static int i865_get_display_clock_speed(struct drm_device *dev)
3196
{
3197
	return 266000;
3198
}
3199
 
3200
static int i855_get_display_clock_speed(struct drm_device *dev)
3201
{
3202
	u16 hpllcc = 0;
3203
	/* Assume that the hardware is in the high speed state.  This
3204
	 * should be the default.
3205
	 */
3206
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3207
	case GC_CLOCK_133_200:
3208
	case GC_CLOCK_100_200:
3209
		return 200000;
3210
	case GC_CLOCK_166_250:
3211
		return 250000;
3212
	case GC_CLOCK_100_133:
3213
		return 133000;
3214
	}
3215
 
3216
	/* Shouldn't happen */
3217
	return 0;
3218
}
3219
 
3220
static int i830_get_display_clock_speed(struct drm_device *dev)
3221
{
3222
	return 133000;
3223
}
3224
 
3225
struct fdi_m_n {
3226
    u32        tu;
3227
    u32        gmch_m;
3228
    u32        gmch_n;
3229
    u32        link_m;
3230
    u32        link_n;
3231
};
3232
 
3233
static void
3234
fdi_reduce_ratio(u32 *num, u32 *den)
3235
{
3236
	while (*num > 0xffffff || *den > 0xffffff) {
3237
		*num >>= 1;
3238
		*den >>= 1;
3239
	}
3240
}
3241
 
3242
static void
3243
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3244
		     int link_clock, struct fdi_m_n *m_n)
3245
{
3246
	m_n->tu = 64; /* default size */
3247
 
3248
	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3249
	m_n->gmch_m = bits_per_pixel * pixel_clock;
3250
	m_n->gmch_n = link_clock * nlanes * 8;
3251
	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3252
 
3253
	m_n->link_m = pixel_clock;
3254
	m_n->link_n = link_clock;
3255
	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3256
}
3257
 
3258
 
3259
struct intel_watermark_params {
3260
    unsigned long fifo_size;
3261
    unsigned long max_wm;
3262
    unsigned long default_wm;
3263
    unsigned long guard_size;
3264
    unsigned long cacheline_size;
3265
};
3266
 
3267
/* Pineview has different values for various configs */
3268
static const struct intel_watermark_params pineview_display_wm = {
3269
    PINEVIEW_DISPLAY_FIFO,
3270
    PINEVIEW_MAX_WM,
3271
    PINEVIEW_DFT_WM,
3272
    PINEVIEW_GUARD_WM,
3273
    PINEVIEW_FIFO_LINE_SIZE
3274
};
3275
static const struct intel_watermark_params pineview_display_hplloff_wm = {
3276
    PINEVIEW_DISPLAY_FIFO,
3277
    PINEVIEW_MAX_WM,
3278
    PINEVIEW_DFT_HPLLOFF_WM,
3279
    PINEVIEW_GUARD_WM,
3280
    PINEVIEW_FIFO_LINE_SIZE
3281
};
3282
static const struct intel_watermark_params pineview_cursor_wm = {
3283
    PINEVIEW_CURSOR_FIFO,
3284
    PINEVIEW_CURSOR_MAX_WM,
3285
    PINEVIEW_CURSOR_DFT_WM,
3286
    PINEVIEW_CURSOR_GUARD_WM,
3287
    PINEVIEW_FIFO_LINE_SIZE,
3288
};
3289
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3290
    PINEVIEW_CURSOR_FIFO,
3291
    PINEVIEW_CURSOR_MAX_WM,
3292
    PINEVIEW_CURSOR_DFT_WM,
3293
    PINEVIEW_CURSOR_GUARD_WM,
3294
    PINEVIEW_FIFO_LINE_SIZE
3295
};
3296
static const struct intel_watermark_params g4x_wm_info = {
3297
    G4X_FIFO_SIZE,
3298
    G4X_MAX_WM,
3299
    G4X_MAX_WM,
3300
    2,
3301
    G4X_FIFO_LINE_SIZE,
3302
};
3303
static const struct intel_watermark_params g4x_cursor_wm_info = {
3304
    I965_CURSOR_FIFO,
3305
    I965_CURSOR_MAX_WM,
3306
    I965_CURSOR_DFT_WM,
3307
    2,
3308
    G4X_FIFO_LINE_SIZE,
3309
};
3310
static const struct intel_watermark_params i965_cursor_wm_info = {
3311
    I965_CURSOR_FIFO,
3312
    I965_CURSOR_MAX_WM,
3313
    I965_CURSOR_DFT_WM,
3314
    2,
3315
    I915_FIFO_LINE_SIZE,
3316
};
3317
static const struct intel_watermark_params i945_wm_info = {
3318
    I945_FIFO_SIZE,
3319
    I915_MAX_WM,
3320
    1,
3321
    2,
3322
    I915_FIFO_LINE_SIZE
3323
};
3324
static const struct intel_watermark_params i915_wm_info = {
3325
    I915_FIFO_SIZE,
3326
    I915_MAX_WM,
3327
    1,
3328
    2,
3329
    I915_FIFO_LINE_SIZE
3330
};
3331
static const struct intel_watermark_params i855_wm_info = {
3332
    I855GM_FIFO_SIZE,
3333
    I915_MAX_WM,
3334
    1,
3335
    2,
3336
    I830_FIFO_LINE_SIZE
3337
};
3338
static const struct intel_watermark_params i830_wm_info = {
3339
    I830_FIFO_SIZE,
3340
    I915_MAX_WM,
3341
    1,
3342
    2,
3343
    I830_FIFO_LINE_SIZE
3344
};
3345
 
3346
static const struct intel_watermark_params ironlake_display_wm_info = {
3347
    ILK_DISPLAY_FIFO,
3348
    ILK_DISPLAY_MAXWM,
3349
    ILK_DISPLAY_DFTWM,
3350
    2,
3351
    ILK_FIFO_LINE_SIZE
3352
};
3353
static const struct intel_watermark_params ironlake_cursor_wm_info = {
3354
    ILK_CURSOR_FIFO,
3355
    ILK_CURSOR_MAXWM,
3356
    ILK_CURSOR_DFTWM,
3357
    2,
3358
    ILK_FIFO_LINE_SIZE
3359
};
3360
static const struct intel_watermark_params ironlake_display_srwm_info = {
3361
    ILK_DISPLAY_SR_FIFO,
3362
    ILK_DISPLAY_MAX_SRWM,
3363
    ILK_DISPLAY_DFT_SRWM,
3364
    2,
3365
    ILK_FIFO_LINE_SIZE
3366
};
3367
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3368
    ILK_CURSOR_SR_FIFO,
3369
    ILK_CURSOR_MAX_SRWM,
3370
    ILK_CURSOR_DFT_SRWM,
3371
    2,
3372
    ILK_FIFO_LINE_SIZE
3373
};
3374
 
3375
static const struct intel_watermark_params sandybridge_display_wm_info = {
3376
    SNB_DISPLAY_FIFO,
3377
    SNB_DISPLAY_MAXWM,
3378
    SNB_DISPLAY_DFTWM,
3379
    2,
3380
    SNB_FIFO_LINE_SIZE
3381
};
3382
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3383
    SNB_CURSOR_FIFO,
3384
    SNB_CURSOR_MAXWM,
3385
    SNB_CURSOR_DFTWM,
3386
    2,
3387
    SNB_FIFO_LINE_SIZE
3388
};
3389
static const struct intel_watermark_params sandybridge_display_srwm_info = {
3390
    SNB_DISPLAY_SR_FIFO,
3391
    SNB_DISPLAY_MAX_SRWM,
3392
    SNB_DISPLAY_DFT_SRWM,
3393
    2,
3394
    SNB_FIFO_LINE_SIZE
3395
};
3396
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3397
    SNB_CURSOR_SR_FIFO,
3398
    SNB_CURSOR_MAX_SRWM,
3399
    SNB_CURSOR_DFT_SRWM,
3400
    2,
3401
    SNB_FIFO_LINE_SIZE
3402
};
3403
 
3404
 
3405
/**
3406
 * intel_calculate_wm - calculate watermark level
3407
 * @clock_in_khz: pixel clock
3408
 * @wm: chip FIFO params
3409
 * @pixel_size: display pixel size
3410
 * @latency_ns: memory latency for the platform
3411
 *
3412
 * Calculate the watermark level (the level at which the display plane will
3413
 * start fetching from memory again).  Each chip has a different display
3414
 * FIFO size and allocation, so the caller needs to figure that out and pass
3415
 * in the correct intel_watermark_params structure.
3416
 *
3417
 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3418
 * on the pixel size.  When it reaches the watermark level, it'll start
3419
 * fetching FIFO line sized based chunks from memory until the FIFO fills
3420
 * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3421
 * will occur, and a display engine hang could result.
3422
 */
3423
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3424
                    const struct intel_watermark_params *wm,
3425
                    int fifo_size,
3426
                    int pixel_size,
3427
                    unsigned long latency_ns)
3428
{
3429
    long entries_required, wm_size;
3430
 
3431
    /*
3432
     * Note: we need to make sure we don't overflow for various clock &
3433
     * latency values.
3434
     * clocks go from a few thousand to several hundred thousand.
3435
     * latency is usually a few thousand
3436
     */
3437
    entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3438
        1000;
3439
    entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3440
 
3441
    DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3442
 
3443
    wm_size = fifo_size - (entries_required + wm->guard_size);
3444
 
3445
    DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3446
 
3447
    /* Don't promote wm_size to unsigned... */
3448
    if (wm_size > (long)wm->max_wm)
3449
        wm_size = wm->max_wm;
3450
    if (wm_size <= 0)
3451
        wm_size = wm->default_wm;
3452
    return wm_size;
3453
}
3454
 
3455
struct cxsr_latency {
3456
    int is_desktop;
3457
    int is_ddr3;
3458
    unsigned long fsb_freq;
3459
    unsigned long mem_freq;
3460
    unsigned long display_sr;
3461
    unsigned long display_hpll_disable;
3462
    unsigned long cursor_sr;
3463
    unsigned long cursor_hpll_disable;
3464
};
3465
 
3466
static const struct cxsr_latency cxsr_latency_table[] = {
3467
    {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3468
    {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3469
    {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3470
    {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3471
    {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3472
 
3473
    {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3474
    {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3475
    {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3476
    {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3477
    {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3478
 
3479
    {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3480
    {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3481
    {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3482
    {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3483
    {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3484
 
3485
    {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3486
    {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3487
    {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3488
    {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3489
    {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3490
 
3491
    {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3492
    {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3493
    {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3494
    {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3495
    {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3496
 
3497
    {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3498
    {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3499
    {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3500
    {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3501
    {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3502
};
3503
 
3504
static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3505
                             int is_ddr3,
3506
                             int fsb,
3507
                             int mem)
3508
{
3509
    const struct cxsr_latency *latency;
3510
    int i;
3511
 
3512
    if (fsb == 0 || mem == 0)
3513
        return NULL;
3514
 
3515
    for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3516
        latency = &cxsr_latency_table[i];
3517
        if (is_desktop == latency->is_desktop &&
3518
            is_ddr3 == latency->is_ddr3 &&
3519
            fsb == latency->fsb_freq && mem == latency->mem_freq)
3520
            return latency;
3521
    }
3522
 
3523
    DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3524
 
3525
    return NULL;
3526
}
3527
 
3528
static void pineview_disable_cxsr(struct drm_device *dev)
3529
{
3530
    struct drm_i915_private *dev_priv = dev->dev_private;
3531
 
3532
    /* deactivate cxsr */
3533
    I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3534
}
3535
 
3536
/*
3537
 * Latency for FIFO fetches is dependent on several factors:
3538
 *   - memory configuration (speed, channels)
3539
 *   - chipset
3540
 *   - current MCH state
3541
 * It can be fairly high in some situations, so here we assume a fairly
3542
 * pessimal value.  It's a tradeoff between extra memory fetches (if we
3543
 * set this value too high, the FIFO will fetch frequently to stay full)
3544
 * and power consumption (set it too low to save power and we might see
3545
 * FIFO underruns and display "flicker").
3546
 *
3547
 * A value of 5us seems to be a good balance; safe for very low end
3548
 * platforms but not overly aggressive on lower latency configs.
3549
 */
3550
static const int latency_ns = 5000;
3551
 
3552
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3553
{
3554
	struct drm_i915_private *dev_priv = dev->dev_private;
3555
	uint32_t dsparb = I915_READ(DSPARB);
3556
	int size;
3557
 
3558
	size = dsparb & 0x7f;
3559
	if (plane)
3560
		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3561
 
3562
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3563
		      plane ? "B" : "A", size);
3564
 
3565
	return size;
3566
}
3567
 
3568
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3569
{
3570
	struct drm_i915_private *dev_priv = dev->dev_private;
3571
	uint32_t dsparb = I915_READ(DSPARB);
3572
	int size;
3573
 
3574
	size = dsparb & 0x1ff;
3575
	if (plane)
3576
		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3577
	size >>= 1; /* Convert to cachelines */
3578
 
3579
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3580
		      plane ? "B" : "A", size);
3581
 
3582
	return size;
3583
}
3584
 
3585
static int i845_get_fifo_size(struct drm_device *dev, int plane)
3586
{
3587
	struct drm_i915_private *dev_priv = dev->dev_private;
3588
	uint32_t dsparb = I915_READ(DSPARB);
3589
	int size;
3590
 
3591
	size = dsparb & 0x7f;
3592
	size >>= 2; /* Convert to cachelines */
3593
 
3594
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3595
		      plane ? "B" : "A",
3596
		      size);
3597
 
3598
	return size;
3599
}
3600
 
3601
static int i830_get_fifo_size(struct drm_device *dev, int plane)
3602
{
3603
	struct drm_i915_private *dev_priv = dev->dev_private;
3604
	uint32_t dsparb = I915_READ(DSPARB);
3605
	int size;
3606
 
3607
	size = dsparb & 0x7f;
3608
	size >>= 1; /* Convert to cachelines */
3609
 
3610
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3611
		      plane ? "B" : "A", size);
3612
 
3613
	return size;
3614
}
3615
 
3616
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3617
{
3618
    struct drm_crtc *crtc, *enabled = NULL;
3619
 
3620
    list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3621
        if (crtc->enabled && crtc->fb) {
3622
            if (enabled)
3623
                return NULL;
3624
            enabled = crtc;
3625
        }
3626
    }
3627
 
3628
    return enabled;
3629
}
3630
 
3631
static void pineview_update_wm(struct drm_device *dev)
3632
{
3633
	struct drm_i915_private *dev_priv = dev->dev_private;
3634
	struct drm_crtc *crtc;
3635
	const struct cxsr_latency *latency;
3636
	u32 reg;
3637
	unsigned long wm;
3638
 
3639
	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3640
					 dev_priv->fsb_freq, dev_priv->mem_freq);
3641
	if (!latency) {
3642
		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3643
		pineview_disable_cxsr(dev);
3644
		return;
3645
	}
3646
 
3647
	crtc = single_enabled_crtc(dev);
3648
	if (crtc) {
3649
		int clock = crtc->mode.clock;
3650
		int pixel_size = crtc->fb->bits_per_pixel / 8;
3651
 
3652
		/* Display SR */
3653
		wm = intel_calculate_wm(clock, &pineview_display_wm,
3654
					pineview_display_wm.fifo_size,
3655
					pixel_size, latency->display_sr);
3656
		reg = I915_READ(DSPFW1);
3657
		reg &= ~DSPFW_SR_MASK;
3658
		reg |= wm << DSPFW_SR_SHIFT;
3659
		I915_WRITE(DSPFW1, reg);
3660
		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3661
 
3662
		/* cursor SR */
3663
		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3664
					pineview_display_wm.fifo_size,
3665
					pixel_size, latency->cursor_sr);
3666
		reg = I915_READ(DSPFW3);
3667
		reg &= ~DSPFW_CURSOR_SR_MASK;
3668
		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3669
		I915_WRITE(DSPFW3, reg);
3670
 
3671
		/* Display HPLL off SR */
3672
		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3673
					pineview_display_hplloff_wm.fifo_size,
3674
					pixel_size, latency->display_hpll_disable);
3675
		reg = I915_READ(DSPFW3);
3676
		reg &= ~DSPFW_HPLL_SR_MASK;
3677
		reg |= wm & DSPFW_HPLL_SR_MASK;
3678
		I915_WRITE(DSPFW3, reg);
3679
 
3680
		/* cursor HPLL off SR */
3681
		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
3682
					pineview_display_hplloff_wm.fifo_size,
3683
					pixel_size, latency->cursor_hpll_disable);
3684
		reg = I915_READ(DSPFW3);
3685
		reg &= ~DSPFW_HPLL_CURSOR_MASK;
3686
		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
3687
		I915_WRITE(DSPFW3, reg);
3688
		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
3689
 
3690
		/* activate cxsr */
3691
		I915_WRITE(DSPFW3,
3692
			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
3693
		DRM_DEBUG_KMS("Self-refresh is enabled\n");
3694
	} else {
3695
		pineview_disable_cxsr(dev);
3696
		DRM_DEBUG_KMS("Self-refresh is disabled\n");
3697
	}
3698
}
3699
 
3700
static bool g4x_compute_wm0(struct drm_device *dev,
3701
                int plane,
3702
                const struct intel_watermark_params *display,
3703
                int display_latency_ns,
3704
                const struct intel_watermark_params *cursor,
3705
                int cursor_latency_ns,
3706
                int *plane_wm,
3707
                int *cursor_wm)
3708
{
3709
    struct drm_crtc *crtc;
3710
    int htotal, hdisplay, clock, pixel_size;
3711
    int line_time_us, line_count;
3712
    int entries, tlb_miss;
3713
 
3714
    crtc = intel_get_crtc_for_plane(dev, plane);
3715
    if (crtc->fb == NULL || !crtc->enabled) {
3716
        *cursor_wm = cursor->guard_size;
3717
        *plane_wm = display->guard_size;
3718
        return false;
3719
    }
3720
 
3721
    htotal = crtc->mode.htotal;
3722
    hdisplay = crtc->mode.hdisplay;
3723
    clock = crtc->mode.clock;
3724
    pixel_size = crtc->fb->bits_per_pixel / 8;
3725
 
3726
    /* Use the small buffer method to calculate plane watermark */
3727
    entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3728
    tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3729
    if (tlb_miss > 0)
3730
        entries += tlb_miss;
3731
    entries = DIV_ROUND_UP(entries, display->cacheline_size);
3732
    *plane_wm = entries + display->guard_size;
3733
    if (*plane_wm > (int)display->max_wm)
3734
        *plane_wm = display->max_wm;
3735
 
3736
    /* Use the large buffer method to calculate cursor watermark */
3737
    line_time_us = ((htotal * 1000) / clock);
3738
    line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3739
    entries = line_count * 64 * pixel_size;
3740
    tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3741
    if (tlb_miss > 0)
3742
        entries += tlb_miss;
3743
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3744
    *cursor_wm = entries + cursor->guard_size;
3745
    if (*cursor_wm > (int)cursor->max_wm)
3746
        *cursor_wm = (int)cursor->max_wm;
3747
 
3748
    return true;
3749
}
3750
 
3751
/*
3752
 * Check the wm result.
3753
 *
3754
 * If any calculated watermark values is larger than the maximum value that
3755
 * can be programmed into the associated watermark register, that watermark
3756
 * must be disabled.
3757
 */
3758
static bool g4x_check_srwm(struct drm_device *dev,
3759
			   int display_wm, int cursor_wm,
3760
			   const struct intel_watermark_params *display,
3761
			   const struct intel_watermark_params *cursor)
3762
{
3763
	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
3764
		      display_wm, cursor_wm);
3765
 
3766
	if (display_wm > display->max_wm) {
3767
		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
3768
			      display_wm, display->max_wm);
3769
		return false;
3770
	}
3771
 
3772
	if (cursor_wm > cursor->max_wm) {
3773
		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
3774
			      cursor_wm, cursor->max_wm);
3775
		return false;
3776
	}
3777
 
3778
	if (!(display_wm || cursor_wm)) {
3779
		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
3780
		return false;
3781
	}
3782
 
3783
	return true;
3784
}
3785
 
3786
static bool g4x_compute_srwm(struct drm_device *dev,
3787
			     int plane,
3788
			     int latency_ns,
3789
			     const struct intel_watermark_params *display,
3790
			     const struct intel_watermark_params *cursor,
3791
			     int *display_wm, int *cursor_wm)
3792
{
3793
	struct drm_crtc *crtc;
3794
	int hdisplay, htotal, pixel_size, clock;
3795
	unsigned long line_time_us;
3796
	int line_count, line_size;
3797
	int small, large;
3798
	int entries;
3799
 
3800
	if (!latency_ns) {
3801
		*display_wm = *cursor_wm = 0;
3802
		return false;
3803
	}
3804
 
3805
	crtc = intel_get_crtc_for_plane(dev, plane);
3806
	hdisplay = crtc->mode.hdisplay;
3807
	htotal = crtc->mode.htotal;
3808
	clock = crtc->mode.clock;
3809
	pixel_size = crtc->fb->bits_per_pixel / 8;
3810
 
3811
	line_time_us = (htotal * 1000) / clock;
3812
	line_count = (latency_ns / line_time_us + 1000) / 1000;
3813
	line_size = hdisplay * pixel_size;
3814
 
3815
	/* Use the minimum of the small and large buffer method for primary */
3816
	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3817
	large = line_count * line_size;
3818
 
3819
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3820
	*display_wm = entries + display->guard_size;
3821
 
3822
	/* calculate the self-refresh watermark for display cursor */
3823
	entries = line_count * pixel_size * 64;
3824
	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3825
	*cursor_wm = entries + cursor->guard_size;
3826
 
3827
	return g4x_check_srwm(dev,
3828
			      *display_wm, *cursor_wm,
3829
			      display, cursor);
3830
}
3831
 
3832
#define single_plane_enabled(mask) is_power_of_2(mask)
3833
 
3834
static void g4x_update_wm(struct drm_device *dev)
3835
{
3836
	static const int sr_latency_ns = 12000;
3837
	struct drm_i915_private *dev_priv = dev->dev_private;
3838
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3839
	int plane_sr, cursor_sr;
3840
	unsigned int enabled = 0;
3841
 
3842
	if (g4x_compute_wm0(dev, 0,
3843
			    &g4x_wm_info, latency_ns,
3844
			    &g4x_cursor_wm_info, latency_ns,
3845
			    &planea_wm, &cursora_wm))
3846
		enabled |= 1;
3847
 
3848
	if (g4x_compute_wm0(dev, 1,
3849
			    &g4x_wm_info, latency_ns,
3850
			    &g4x_cursor_wm_info, latency_ns,
3851
			    &planeb_wm, &cursorb_wm))
3852
		enabled |= 2;
3853
 
3854
	plane_sr = cursor_sr = 0;
3855
	if (single_plane_enabled(enabled) &&
3856
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
3857
			     sr_latency_ns,
3858
			     &g4x_wm_info,
3859
			     &g4x_cursor_wm_info,
3860
			     &plane_sr, &cursor_sr))
3861
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3862
	else
3863
		I915_WRITE(FW_BLC_SELF,
3864
			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
3865
 
3866
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
3867
		      planea_wm, cursora_wm,
3868
		      planeb_wm, cursorb_wm,
3869
		      plane_sr, cursor_sr);
3870
 
3871
	I915_WRITE(DSPFW1,
3872
		   (plane_sr << DSPFW_SR_SHIFT) |
3873
		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
3874
		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
3875
		   planea_wm);
3876
	I915_WRITE(DSPFW2,
3877
		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
3878
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
3879
	/* HPLL off in SR has some issues on G4x... disable it */
3880
	I915_WRITE(DSPFW3,
3881
		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
3882
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
3883
}
3884
 
3885
static void i965_update_wm(struct drm_device *dev)
3886
{
3887
	struct drm_i915_private *dev_priv = dev->dev_private;
3888
	struct drm_crtc *crtc;
3889
	int srwm = 1;
3890
	int cursor_sr = 16;
3891
 
3892
	/* Calc sr entries for one plane configs */
3893
	crtc = single_enabled_crtc(dev);
3894
	if (crtc) {
3895
		/* self-refresh has much higher latency */
3896
		static const int sr_latency_ns = 12000;
3897
		int clock = crtc->mode.clock;
3898
		int htotal = crtc->mode.htotal;
3899
		int hdisplay = crtc->mode.hdisplay;
3900
		int pixel_size = crtc->fb->bits_per_pixel / 8;
3901
		unsigned long line_time_us;
3902
		int entries;
3903
 
3904
		line_time_us = ((htotal * 1000) / clock);
3905
 
3906
		/* Use ns/us then divide to preserve precision */
3907
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3908
			pixel_size * hdisplay;
3909
		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
3910
		srwm = I965_FIFO_SIZE - entries;
3911
		if (srwm < 0)
3912
			srwm = 1;
3913
		srwm &= 0x1ff;
3914
		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
3915
			      entries, srwm);
3916
 
3917
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3918
			pixel_size * 64;
3919
		entries = DIV_ROUND_UP(entries,
3920
					  i965_cursor_wm_info.cacheline_size);
3921
		cursor_sr = i965_cursor_wm_info.fifo_size -
3922
			(entries + i965_cursor_wm_info.guard_size);
3923
 
3924
		if (cursor_sr > i965_cursor_wm_info.max_wm)
3925
			cursor_sr = i965_cursor_wm_info.max_wm;
3926
 
3927
		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3928
			      "cursor %d\n", srwm, cursor_sr);
3929
 
3930
		if (IS_CRESTLINE(dev))
3931
			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3932
	} else {
3933
		/* Turn off self refresh if both pipes are enabled */
3934
		if (IS_CRESTLINE(dev))
3935
			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
3936
				   & ~FW_BLC_SELF_EN);
3937
	}
3938
 
3939
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
3940
		      srwm);
3941
 
3942
	/* 965 has limitations... */
3943
	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
3944
		   (8 << 16) | (8 << 8) | (8 << 0));
3945
	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
3946
	/* update cursor SR watermark */
3947
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
3948
}
3949
 
3950
static void i9xx_update_wm(struct drm_device *dev)
3951
{
3952
	struct drm_i915_private *dev_priv = dev->dev_private;
3953
	const struct intel_watermark_params *wm_info;
3954
	uint32_t fwater_lo;
3955
	uint32_t fwater_hi;
3956
	int cwm, srwm = 1;
3957
	int fifo_size;
3958
	int planea_wm, planeb_wm;
3959
	struct drm_crtc *crtc, *enabled = NULL;
3960
 
3961
	if (IS_I945GM(dev))
3962
		wm_info = &i945_wm_info;
3963
	else if (!IS_GEN2(dev))
3964
		wm_info = &i915_wm_info;
3965
	else
3966
		wm_info = &i855_wm_info;
3967
 
3968
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
3969
	crtc = intel_get_crtc_for_plane(dev, 0);
3970
	if (crtc->enabled && crtc->fb) {
3971
		planea_wm = intel_calculate_wm(crtc->mode.clock,
3972
					       wm_info, fifo_size,
3973
					       crtc->fb->bits_per_pixel / 8,
3974
					       latency_ns);
3975
		enabled = crtc;
3976
	} else
3977
		planea_wm = fifo_size - wm_info->guard_size;
3978
 
3979
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
3980
	crtc = intel_get_crtc_for_plane(dev, 1);
3981
	if (crtc->enabled && crtc->fb) {
3982
		planeb_wm = intel_calculate_wm(crtc->mode.clock,
3983
					       wm_info, fifo_size,
3984
					       crtc->fb->bits_per_pixel / 8,
3985
					       latency_ns);
3986
		if (enabled == NULL)
3987
			enabled = crtc;
3988
		else
3989
			enabled = NULL;
3990
	} else
3991
		planeb_wm = fifo_size - wm_info->guard_size;
3992
 
3993
	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
3994
 
3995
	/*
3996
	 * Overlay gets an aggressive default since video jitter is bad.
3997
	 */
3998
	cwm = 2;
3999
 
4000
	/* Play safe and disable self-refresh before adjusting watermarks. */
4001
	if (IS_I945G(dev) || IS_I945GM(dev))
4002
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4003
	else if (IS_I915GM(dev))
4004
		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4005
 
4006
	/* Calc sr entries for one plane configs */
4007
	if (HAS_FW_BLC(dev) && enabled) {
4008
		/* self-refresh has much higher latency */
4009
		static const int sr_latency_ns = 6000;
4010
		int clock = enabled->mode.clock;
4011
		int htotal = enabled->mode.htotal;
4012
		int hdisplay = enabled->mode.hdisplay;
4013
		int pixel_size = enabled->fb->bits_per_pixel / 8;
4014
		unsigned long line_time_us;
4015
		int entries;
4016
 
4017
		line_time_us = (htotal * 1000) / clock;
4018
 
4019
		/* Use ns/us then divide to preserve precision */
4020
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4021
			pixel_size * hdisplay;
4022
		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4023
		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4024
		srwm = wm_info->fifo_size - entries;
4025
		if (srwm < 0)
4026
			srwm = 1;
4027
 
4028
		if (IS_I945G(dev) || IS_I945GM(dev))
4029
			I915_WRITE(FW_BLC_SELF,
4030
				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4031
		else if (IS_I915GM(dev))
4032
			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4033
	}
4034
 
4035
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4036
		      planea_wm, planeb_wm, cwm, srwm);
4037
 
4038
	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4039
	fwater_hi = (cwm & 0x1f);
4040
 
4041
	/* Set request length to 8 cachelines per fetch */
4042
	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4043
	fwater_hi = fwater_hi | (1 << 8);
4044
 
4045
	I915_WRITE(FW_BLC, fwater_lo);
4046
	I915_WRITE(FW_BLC2, fwater_hi);
4047
 
4048
	if (HAS_FW_BLC(dev)) {
4049
		if (enabled) {
4050
			if (IS_I945G(dev) || IS_I945GM(dev))
4051
				I915_WRITE(FW_BLC_SELF,
4052
					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4053
			else if (IS_I915GM(dev))
4054
				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4055
			DRM_DEBUG_KMS("memory self refresh enabled\n");
4056
		} else
4057
			DRM_DEBUG_KMS("memory self refresh disabled\n");
4058
	}
4059
}
4060
 
4061
static void i830_update_wm(struct drm_device *dev)
4062
{
4063
	struct drm_i915_private *dev_priv = dev->dev_private;
4064
	struct drm_crtc *crtc;
4065
	uint32_t fwater_lo;
4066
	int planea_wm;
4067
 
4068
	crtc = single_enabled_crtc(dev);
4069
	if (crtc == NULL)
4070
		return;
4071
 
4072
	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4073
				       dev_priv->display.get_fifo_size(dev, 0),
4074
				       crtc->fb->bits_per_pixel / 8,
4075
				       latency_ns);
4076
	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4077
	fwater_lo |= (3<<8) | planea_wm;
4078
 
4079
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4080
 
4081
	I915_WRITE(FW_BLC, fwater_lo);
4082
}
4083
 
4084
#define ILK_LP0_PLANE_LATENCY		700
4085
#define ILK_LP0_CURSOR_LATENCY		1300
4086
 
4087
/*
4088
 * Check the wm result.
4089
 *
4090
 * If any calculated watermark values is larger than the maximum value that
4091
 * can be programmed into the associated watermark register, that watermark
4092
 * must be disabled.
4093
 */
4094
static bool ironlake_check_srwm(struct drm_device *dev, int level,
4095
				int fbc_wm, int display_wm, int cursor_wm,
4096
				const struct intel_watermark_params *display,
4097
				const struct intel_watermark_params *cursor)
4098
{
4099
	struct drm_i915_private *dev_priv = dev->dev_private;
4100
 
4101
	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4102
		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4103
 
4104
	if (fbc_wm > SNB_FBC_MAX_SRWM) {
4105
		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4106
			      fbc_wm, SNB_FBC_MAX_SRWM, level);
4107
 
4108
		/* fbc has it's own way to disable FBC WM */
4109
		I915_WRITE(DISP_ARB_CTL,
4110
			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4111
		return false;
4112
	}
4113
 
4114
	if (display_wm > display->max_wm) {
4115
		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4116
			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
4117
		return false;
4118
	}
4119
 
4120
	if (cursor_wm > cursor->max_wm) {
4121
		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4122
			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4123
		return false;
4124
	}
4125
 
4126
	if (!(fbc_wm || display_wm || cursor_wm)) {
4127
		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4128
		return false;
4129
	}
4130
 
4131
	return true;
4132
}
4133
 
4134
/*
4135
 * Compute watermark values of WM[1-3],
4136
 */
4137
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4138
                  int latency_ns,
4139
                  const struct intel_watermark_params *display,
4140
                  const struct intel_watermark_params *cursor,
4141
                  int *fbc_wm, int *display_wm, int *cursor_wm)
4142
{
4143
    struct drm_crtc *crtc;
4144
    unsigned long line_time_us;
4145
    int hdisplay, htotal, pixel_size, clock;
4146
    int line_count, line_size;
4147
    int small, large;
4148
    int entries;
4149
 
4150
    if (!latency_ns) {
4151
        *fbc_wm = *display_wm = *cursor_wm = 0;
4152
        return false;
4153
    }
4154
 
4155
    crtc = intel_get_crtc_for_plane(dev, plane);
4156
    hdisplay = crtc->mode.hdisplay;
4157
    htotal = crtc->mode.htotal;
4158
    clock = crtc->mode.clock;
4159
    pixel_size = crtc->fb->bits_per_pixel / 8;
4160
 
4161
    line_time_us = (htotal * 1000) / clock;
4162
    line_count = (latency_ns / line_time_us + 1000) / 1000;
4163
    line_size = hdisplay * pixel_size;
4164
 
4165
    /* Use the minimum of the small and large buffer method for primary */
4166
    small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4167
    large = line_count * line_size;
4168
 
4169
    entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4170
    *display_wm = entries + display->guard_size;
4171
 
4172
    /*
4173
     * Spec says:
4174
     * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4175
     */
4176
    *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4177
 
4178
    /* calculate the self-refresh watermark for display cursor */
4179
    entries = line_count * pixel_size * 64;
4180
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4181
    *cursor_wm = entries + cursor->guard_size;
4182
 
4183
    return ironlake_check_srwm(dev, level,
4184
                   *fbc_wm, *display_wm, *cursor_wm,
4185
                   display, cursor);
4186
}
4187
 
4188
static void ironlake_update_wm(struct drm_device *dev)
4189
{
4190
	struct drm_i915_private *dev_priv = dev->dev_private;
4191
	int fbc_wm, plane_wm, cursor_wm;
4192
	unsigned int enabled;
4193
 
4194
	enabled = 0;
4195
	if (g4x_compute_wm0(dev, 0,
4196
			    &ironlake_display_wm_info,
4197
			    ILK_LP0_PLANE_LATENCY,
4198
			    &ironlake_cursor_wm_info,
4199
			    ILK_LP0_CURSOR_LATENCY,
4200
			    &plane_wm, &cursor_wm)) {
4201
		I915_WRITE(WM0_PIPEA_ILK,
4202
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4203
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4204
			      " plane %d, " "cursor: %d\n",
4205
			      plane_wm, cursor_wm);
4206
		enabled |= 1;
4207
	}
4208
 
4209
	if (g4x_compute_wm0(dev, 1,
4210
			    &ironlake_display_wm_info,
4211
			    ILK_LP0_PLANE_LATENCY,
4212
			    &ironlake_cursor_wm_info,
4213
			    ILK_LP0_CURSOR_LATENCY,
4214
			    &plane_wm, &cursor_wm)) {
4215
		I915_WRITE(WM0_PIPEB_ILK,
4216
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4217
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4218
			      " plane %d, cursor: %d\n",
4219
			      plane_wm, cursor_wm);
4220
		enabled |= 2;
4221
	}
4222
 
4223
	/*
4224
	 * Calculate and update the self-refresh watermark only when one
4225
	 * display plane is used.
4226
	 */
4227
	I915_WRITE(WM3_LP_ILK, 0);
4228
	I915_WRITE(WM2_LP_ILK, 0);
4229
	I915_WRITE(WM1_LP_ILK, 0);
4230
 
4231
	if (!single_plane_enabled(enabled))
4232
		return;
4233
	enabled = ffs(enabled) - 1;
4234
 
4235
	/* WM1 */
4236
	if (!ironlake_compute_srwm(dev, 1, enabled,
4237
				   ILK_READ_WM1_LATENCY() * 500,
4238
				   &ironlake_display_srwm_info,
4239
				   &ironlake_cursor_srwm_info,
4240
				   &fbc_wm, &plane_wm, &cursor_wm))
4241
		return;
4242
 
4243
	I915_WRITE(WM1_LP_ILK,
4244
		   WM1_LP_SR_EN |
4245
		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4246
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4247
		   (plane_wm << WM1_LP_SR_SHIFT) |
4248
		   cursor_wm);
4249
 
4250
	/* WM2 */
4251
	if (!ironlake_compute_srwm(dev, 2, enabled,
4252
				   ILK_READ_WM2_LATENCY() * 500,
4253
				   &ironlake_display_srwm_info,
4254
				   &ironlake_cursor_srwm_info,
4255
				   &fbc_wm, &plane_wm, &cursor_wm))
4256
		return;
4257
 
4258
	I915_WRITE(WM2_LP_ILK,
4259
		   WM2_LP_EN |
4260
		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4261
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4262
		   (plane_wm << WM1_LP_SR_SHIFT) |
4263
		   cursor_wm);
4264
 
4265
	/*
4266
	 * WM3 is unsupported on ILK, probably because we don't have latency
4267
	 * data for that power state
4268
	 */
4269
}
4270
 
4271
static void sandybridge_update_wm(struct drm_device *dev)
4272
{
4273
	struct drm_i915_private *dev_priv = dev->dev_private;
4274
	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4275
	int fbc_wm, plane_wm, cursor_wm;
4276
	unsigned int enabled;
4277
 
4278
	enabled = 0;
4279
	if (g4x_compute_wm0(dev, 0,
4280
			    &sandybridge_display_wm_info, latency,
4281
			    &sandybridge_cursor_wm_info, latency,
4282
			    &plane_wm, &cursor_wm)) {
4283
		I915_WRITE(WM0_PIPEA_ILK,
4284
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4285
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4286
			      " plane %d, " "cursor: %d\n",
4287
			      plane_wm, cursor_wm);
4288
		enabled |= 1;
4289
	}
4290
 
4291
	if (g4x_compute_wm0(dev, 1,
4292
			    &sandybridge_display_wm_info, latency,
4293
			    &sandybridge_cursor_wm_info, latency,
4294
			    &plane_wm, &cursor_wm)) {
4295
		I915_WRITE(WM0_PIPEB_ILK,
4296
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4297
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4298
			      " plane %d, cursor: %d\n",
4299
			      plane_wm, cursor_wm);
4300
		enabled |= 2;
4301
	}
4302
 
4303
	/*
4304
	 * Calculate and update the self-refresh watermark only when one
4305
	 * display plane is used.
4306
	 *
4307
	 * SNB support 3 levels of watermark.
4308
	 *
4309
	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4310
	 * and disabled in the descending order
4311
	 *
4312
	 */
4313
	I915_WRITE(WM3_LP_ILK, 0);
4314
	I915_WRITE(WM2_LP_ILK, 0);
4315
	I915_WRITE(WM1_LP_ILK, 0);
4316
 
4317
	if (!single_plane_enabled(enabled))
4318
		return;
4319
	enabled = ffs(enabled) - 1;
4320
 
4321
	/* WM1 */
4322
	if (!ironlake_compute_srwm(dev, 1, enabled,
4323
				   SNB_READ_WM1_LATENCY() * 500,
4324
				   &sandybridge_display_srwm_info,
4325
				   &sandybridge_cursor_srwm_info,
4326
				   &fbc_wm, &plane_wm, &cursor_wm))
4327
		return;
4328
 
4329
	I915_WRITE(WM1_LP_ILK,
4330
		   WM1_LP_SR_EN |
4331
		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4332
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4333
		   (plane_wm << WM1_LP_SR_SHIFT) |
4334
		   cursor_wm);
4335
 
4336
	/* WM2 */
4337
	if (!ironlake_compute_srwm(dev, 2, enabled,
4338
				   SNB_READ_WM2_LATENCY() * 500,
4339
				   &sandybridge_display_srwm_info,
4340
				   &sandybridge_cursor_srwm_info,
4341
				   &fbc_wm, &plane_wm, &cursor_wm))
4342
		return;
4343
 
4344
	I915_WRITE(WM2_LP_ILK,
4345
		   WM2_LP_EN |
4346
		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4347
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4348
		   (plane_wm << WM1_LP_SR_SHIFT) |
4349
		   cursor_wm);
4350
 
4351
	/* WM3 */
4352
	if (!ironlake_compute_srwm(dev, 3, enabled,
4353
				   SNB_READ_WM3_LATENCY() * 500,
4354
				   &sandybridge_display_srwm_info,
4355
				   &sandybridge_cursor_srwm_info,
4356
				   &fbc_wm, &plane_wm, &cursor_wm))
4357
		return;
4358
 
4359
	I915_WRITE(WM3_LP_ILK,
4360
		   WM3_LP_EN |
4361
		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4362
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4363
		   (plane_wm << WM1_LP_SR_SHIFT) |
4364
		   cursor_wm);
4365
}
4366
 
4367
/**
4368
 * intel_update_watermarks - update FIFO watermark values based on current modes
4369
 *
4370
 * Calculate watermark values for the various WM regs based on current mode
4371
 * and plane configuration.
4372
 *
4373
 * There are several cases to deal with here:
4374
 *   - normal (i.e. non-self-refresh)
4375
 *   - self-refresh (SR) mode
4376
 *   - lines are large relative to FIFO size (buffer can hold up to 2)
4377
 *   - lines are small relative to FIFO size (buffer can hold more than 2
4378
 *     lines), so need to account for TLB latency
4379
 *
4380
 *   The normal calculation is:
4381
 *     watermark = dotclock * bytes per pixel * latency
4382
 *   where latency is platform & configuration dependent (we assume pessimal
4383
 *   values here).
4384
 *
4385
 *   The SR calculation is:
4386
 *     watermark = (trunc(latency/line time)+1) * surface width *
4387
 *       bytes per pixel
4388
 *   where
4389
 *     line time = htotal / dotclock
4390
 *     surface width = hdisplay for normal plane and 64 for cursor
4391
 *   and latency is assumed to be high, as above.
4392
 *
4393
 * The final value programmed to the register should always be rounded up,
4394
 * and include an extra 2 entries to account for clock crossings.
4395
 *
4396
 * We don't use the sprite, so we can ignore that.  And on Crestline we have
4397
 * to set the non-SR watermarks to 8.
4398
 */
4399
static void intel_update_watermarks(struct drm_device *dev)
4400
{
4401
	struct drm_i915_private *dev_priv = dev->dev_private;
4402
 
4403
	if (dev_priv->display.update_wm)
4404
		dev_priv->display.update_wm(dev);
4405
}
4406
 
4407
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4408
{
4409
	return dev_priv->lvds_use_ssc && i915_panel_use_ssc
4410
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4411
}
4412
 
4413
/**
4414
 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4415
 * @crtc: CRTC structure
4416
 *
4417
 * A pipe may be connected to one or more outputs.  Based on the depth of the
4418
 * attached framebuffer, choose a good color depth to use on the pipe.
4419
 *
4420
 * If possible, match the pipe depth to the fb depth.  In some cases, this
4421
 * isn't ideal, because the connected output supports a lesser or restricted
4422
 * set of depths.  Resolve that here:
4423
 *    LVDS typically supports only 6bpc, so clamp down in that case
4424
 *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4425
 *    Displays may support a restricted set as well, check EDID and clamp as
4426
 *      appropriate.
4427
 *
4428
 * RETURNS:
4429
 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4430
 * true if they don't match).
4431
 */
4432
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4433
					 unsigned int *pipe_bpp)
4434
{
4435
	struct drm_device *dev = crtc->dev;
4436
	struct drm_i915_private *dev_priv = dev->dev_private;
4437
	struct drm_encoder *encoder;
4438
	struct drm_connector *connector;
4439
	unsigned int display_bpc = UINT_MAX, bpc;
4440
 
4441
	/* Walk the encoders & connectors on this crtc, get min bpc */
4442
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4443
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4444
 
4445
		if (encoder->crtc != crtc)
4446
			continue;
4447
 
4448
		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4449
			unsigned int lvds_bpc;
4450
 
4451
			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4452
			    LVDS_A3_POWER_UP)
4453
				lvds_bpc = 8;
4454
			else
4455
				lvds_bpc = 6;
4456
 
4457
			if (lvds_bpc < display_bpc) {
4458
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4459
				display_bpc = lvds_bpc;
4460
			}
4461
			continue;
4462
		}
4463
 
4464
		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4465
			/* Use VBT settings if we have an eDP panel */
4466
			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4467
 
4468
			if (edp_bpc < display_bpc) {
4469
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4470
				display_bpc = edp_bpc;
4471
			}
4472
			continue;
4473
		}
4474
 
4475
		/* Not one of the known troublemakers, check the EDID */
4476
		list_for_each_entry(connector, &dev->mode_config.connector_list,
4477
				    head) {
4478
			if (connector->encoder != encoder)
4479
				continue;
4480
 
4481
			/* Don't use an invalid EDID bpc value */
4482
			if (connector->display_info.bpc &&
4483
			    connector->display_info.bpc < display_bpc) {
4484
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4485
				display_bpc = connector->display_info.bpc;
4486
			}
4487
		}
4488
 
4489
		/*
4490
		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4491
		 * through, clamp it down.  (Note: >12bpc will be caught below.)
4492
		 */
4493
		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4494
			if (display_bpc > 8 && display_bpc < 12) {
4495
				DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
4496
				display_bpc = 12;
4497
			} else {
4498
				DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
4499
				display_bpc = 8;
4500
			}
4501
		}
4502
	}
4503
 
4504
	/*
4505
	 * We could just drive the pipe at the highest bpc all the time and
4506
	 * enable dithering as needed, but that costs bandwidth.  So choose
4507
	 * the minimum value that expresses the full color range of the fb but
4508
	 * also stays within the max display bpc discovered above.
4509
	 */
4510
 
4511
	switch (crtc->fb->depth) {
4512
	case 8:
4513
		bpc = 8; /* since we go through a colormap */
4514
		break;
4515
	case 15:
4516
	case 16:
4517
		bpc = 6; /* min is 18bpp */
4518
		break;
4519
	case 24:
4520
		bpc = min((unsigned int)8, display_bpc);
4521
		break;
4522
	case 30:
4523
		bpc = min((unsigned int)10, display_bpc);
4524
		break;
4525
	case 48:
4526
		bpc = min((unsigned int)12, display_bpc);
4527
		break;
4528
	default:
4529
		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4530
		bpc = min((unsigned int)8, display_bpc);
4531
		break;
4532
	}
4533
 
4534
	DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4535
			 bpc, display_bpc);
4536
 
4537
	*pipe_bpp = bpc * 3;
4538
 
4539
	return display_bpc != bpc;
4540
}
4541
 
4542
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4543
                  struct drm_display_mode *mode,
4544
                  struct drm_display_mode *adjusted_mode,
4545
                  int x, int y,
4546
                  struct drm_framebuffer *old_fb)
4547
{
4548
    struct drm_device *dev = crtc->dev;
4549
    struct drm_i915_private *dev_priv = dev->dev_private;
4550
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4551
    int pipe = intel_crtc->pipe;
4552
    int plane = intel_crtc->plane;
4553
    int refclk, num_connectors = 0;
4554
    intel_clock_t clock, reduced_clock;
4555
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4556
    bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4557
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4558
    struct drm_mode_config *mode_config = &dev->mode_config;
4559
    struct intel_encoder *encoder;
4560
    const intel_limit_t *limit;
4561
    int ret;
4562
    u32 temp;
4563
    u32 lvds_sync = 0;
4564
 
4565
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4566
        if (encoder->base.crtc != crtc)
4567
            continue;
4568
 
4569
        switch (encoder->type) {
4570
        case INTEL_OUTPUT_LVDS:
4571
            is_lvds = true;
4572
            break;
4573
        case INTEL_OUTPUT_SDVO:
4574
        case INTEL_OUTPUT_HDMI:
4575
            is_sdvo = true;
4576
            if (encoder->needs_tv_clock)
4577
                is_tv = true;
4578
            break;
4579
        case INTEL_OUTPUT_DVO:
4580
            is_dvo = true;
4581
            break;
4582
        case INTEL_OUTPUT_TVOUT:
4583
            is_tv = true;
4584
            break;
4585
        case INTEL_OUTPUT_ANALOG:
4586
            is_crt = true;
4587
            break;
4588
        case INTEL_OUTPUT_DISPLAYPORT:
4589
            is_dp = true;
4590
            break;
4591
        }
4592
 
4593
        num_connectors++;
4594
    }
4595
 
4596
    if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4597
        refclk = dev_priv->lvds_ssc_freq * 1000;
4598
        DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4599
                  refclk / 1000);
4600
    } else if (!IS_GEN2(dev)) {
4601
        refclk = 96000;
4602
    } else {
4603
        refclk = 48000;
4604
    }
4605
 
4606
    /*
4607
     * Returns a set of divisors for the desired target clock with the given
4608
     * refclk, or FALSE.  The returned values represent the clock equation:
4609
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4610
     */
4611
    limit = intel_limit(crtc, refclk);
4612
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4613
    if (!ok) {
4614
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
4615
        return -EINVAL;
4616
    }
4617
 
4618
    /* Ensure that the cursor is valid for the new mode before changing... */
4619
//    intel_crtc_update_cursor(crtc, true);
4620
 
4621
    if (is_lvds && dev_priv->lvds_downclock_avail) {
4622
        has_reduced_clock = limit->find_pll(limit, crtc,
4623
                            dev_priv->lvds_downclock,
4624
                            refclk,
4625
                            &reduced_clock);
4626
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
4627
            /*
4628
             * If the different P is found, it means that we can't
4629
             * switch the display clock by using the FP0/FP1.
4630
             * In such case we will disable the LVDS downclock
4631
             * feature.
4632
             */
4633
            DRM_DEBUG_KMS("Different P is found for "
4634
                      "LVDS clock/downclock\n");
4635
            has_reduced_clock = 0;
4636
        }
4637
    }
4638
    /* SDVO TV has fixed PLL values depend on its clock range,
4639
       this mirrors vbios setting. */
4640
    if (is_sdvo && is_tv) {
4641
        if (adjusted_mode->clock >= 100000
4642
            && adjusted_mode->clock < 140500) {
4643
            clock.p1 = 2;
4644
            clock.p2 = 10;
4645
            clock.n = 3;
4646
            clock.m1 = 16;
4647
            clock.m2 = 8;
4648
        } else if (adjusted_mode->clock >= 140500
4649
               && adjusted_mode->clock <= 200000) {
4650
            clock.p1 = 1;
4651
            clock.p2 = 10;
4652
            clock.n = 6;
4653
            clock.m1 = 12;
4654
            clock.m2 = 8;
4655
        }
4656
    }
4657
 
4658
    if (IS_PINEVIEW(dev)) {
4659
        fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
4660
        if (has_reduced_clock)
4661
            fp2 = (1 << reduced_clock.n) << 16 |
4662
                reduced_clock.m1 << 8 | reduced_clock.m2;
4663
    } else {
4664
        fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4665
        if (has_reduced_clock)
4666
            fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4667
                reduced_clock.m2;
4668
    }
4669
 
4670
    dpll = DPLL_VGA_MODE_DIS;
4671
 
4672
    if (!IS_GEN2(dev)) {
4673
        if (is_lvds)
4674
            dpll |= DPLLB_MODE_LVDS;
4675
        else
4676
            dpll |= DPLLB_MODE_DAC_SERIAL;
4677
        if (is_sdvo) {
4678
            int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4679
            if (pixel_multiplier > 1) {
4680
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4681
                    dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4682
            }
4683
            dpll |= DPLL_DVO_HIGH_SPEED;
4684
        }
4685
        if (is_dp)
4686
            dpll |= DPLL_DVO_HIGH_SPEED;
4687
 
4688
        /* compute bitmask from p1 value */
4689
        if (IS_PINEVIEW(dev))
4690
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4691
        else {
4692
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4693
            if (IS_G4X(dev) && has_reduced_clock)
4694
                dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4695
        }
4696
        switch (clock.p2) {
4697
        case 5:
4698
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4699
            break;
4700
        case 7:
4701
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4702
            break;
4703
        case 10:
4704
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4705
            break;
4706
        case 14:
4707
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4708
            break;
4709
        }
4710
        if (INTEL_INFO(dev)->gen >= 4)
4711
            dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4712
    } else {
4713
        if (is_lvds) {
4714
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4715
        } else {
4716
            if (clock.p1 == 2)
4717
                dpll |= PLL_P1_DIVIDE_BY_TWO;
4718
            else
4719
                dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4720
            if (clock.p2 == 4)
4721
                dpll |= PLL_P2_DIVIDE_BY_4;
4722
        }
4723
    }
4724
 
4725
    if (is_sdvo && is_tv)
4726
        dpll |= PLL_REF_INPUT_TVCLKINBC;
4727
    else if (is_tv)
4728
        /* XXX: just matching BIOS for now */
4729
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
4730
        dpll |= 3;
4731
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4732
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4733
    else
4734
        dpll |= PLL_REF_INPUT_DREFCLK;
4735
 
4736
    /* setup pipeconf */
4737
    pipeconf = I915_READ(PIPECONF(pipe));
4738
 
4739
    /* Set up the display plane register */
4740
    dspcntr = DISPPLANE_GAMMA_ENABLE;
4741
 
4742
    /* Ironlake's plane is forced to pipe, bit 24 is to
4743
       enable color space conversion */
4744
    if (pipe == 0)
4745
        dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4746
    else
4747
        dspcntr |= DISPPLANE_SEL_PIPE_B;
4748
 
4749
    if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4750
        /* Enable pixel doubling when the dot clock is > 90% of the (display)
4751
         * core speed.
4752
         *
4753
         * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4754
         * pipe == 0 check?
4755
         */
4756
        if (mode->clock >
4757
            dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4758
            pipeconf |= PIPECONF_DOUBLE_WIDE;
4759
        else
4760
            pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4761
    }
4762
 
4763
    dpll |= DPLL_VCO_ENABLE;
4764
 
4765
    DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4766
    drm_mode_debug_printmodeline(mode);
4767
 
4768
    I915_WRITE(FP0(pipe), fp);
4769
    I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4770
 
4771
    POSTING_READ(DPLL(pipe));
4772
    udelay(150);
4773
 
4774
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4775
     * This is an exception to the general rule that mode_set doesn't turn
4776
     * things on.
4777
     */
4778
    if (is_lvds) {
4779
        temp = I915_READ(LVDS);
4780
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4781
        if (pipe == 1) {
4782
            temp |= LVDS_PIPEB_SELECT;
4783
        } else {
4784
            temp &= ~LVDS_PIPEB_SELECT;
4785
        }
4786
        /* set the corresponsding LVDS_BORDER bit */
4787
        temp |= dev_priv->lvds_border_bits;
4788
        /* Set the B0-B3 data pairs corresponding to whether we're going to
4789
         * set the DPLLs for dual-channel mode or not.
4790
         */
4791
        if (clock.p2 == 7)
4792
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4793
        else
4794
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4795
 
4796
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4797
         * appropriately here, but we need to look more thoroughly into how
4798
         * panels behave in the two modes.
4799
         */
4800
        /* set the dithering flag on LVDS as needed */
4801
        if (INTEL_INFO(dev)->gen >= 4) {
4802
            if (dev_priv->lvds_dither)
4803
                temp |= LVDS_ENABLE_DITHER;
4804
            else
4805
                temp &= ~LVDS_ENABLE_DITHER;
4806
        }
4807
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4808
            lvds_sync |= LVDS_HSYNC_POLARITY;
4809
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4810
            lvds_sync |= LVDS_VSYNC_POLARITY;
4811
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
4812
            != lvds_sync) {
4813
            char flags[2] = "-+";
4814
            DRM_INFO("Changing LVDS panel from "
4815
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
4816
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
4817
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
4818
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
4819
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
4820
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4821
            temp |= lvds_sync;
4822
        }
4823
        I915_WRITE(LVDS, temp);
4824
    }
4825
 
4826
    if (is_dp) {
4827
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
4828
    }
4829
 
4830
    I915_WRITE(DPLL(pipe), dpll);
4831
 
4832
    /* Wait for the clocks to stabilize. */
4833
    POSTING_READ(DPLL(pipe));
4834
    udelay(150);
4835
 
4836
    if (INTEL_INFO(dev)->gen >= 4) {
4837
        temp = 0;
4838
        if (is_sdvo) {
4839
            temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4840
            if (temp > 1)
4841
                temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4842
            else
4843
                temp = 0;
4844
        }
4845
        I915_WRITE(DPLL_MD(pipe), temp);
4846
    } else {
4847
        /* The pixel multiplier can only be updated once the
4848
         * DPLL is enabled and the clocks are stable.
4849
         *
4850
         * So write it again.
4851
         */
4852
        I915_WRITE(DPLL(pipe), dpll);
4853
    }
4854
 
4855
    intel_crtc->lowfreq_avail = false;
4856
    if (is_lvds && has_reduced_clock && i915_powersave) {
4857
        I915_WRITE(FP1(pipe), fp2);
4858
        intel_crtc->lowfreq_avail = true;
4859
        if (HAS_PIPE_CXSR(dev)) {
4860
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4861
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4862
        }
4863
    } else {
4864
        I915_WRITE(FP1(pipe), fp);
4865
        if (HAS_PIPE_CXSR(dev)) {
4866
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4867
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4868
        }
4869
    }
4870
 
4871
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4872
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4873
        /* the chip adds 2 halflines automatically */
4874
        adjusted_mode->crtc_vdisplay -= 1;
4875
        adjusted_mode->crtc_vtotal -= 1;
4876
        adjusted_mode->crtc_vblank_start -= 1;
4877
        adjusted_mode->crtc_vblank_end -= 1;
4878
        adjusted_mode->crtc_vsync_end -= 1;
4879
        adjusted_mode->crtc_vsync_start -= 1;
4880
    } else
4881
        pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
4882
 
4883
    I915_WRITE(HTOTAL(pipe),
4884
           (adjusted_mode->crtc_hdisplay - 1) |
4885
           ((adjusted_mode->crtc_htotal - 1) << 16));
4886
    I915_WRITE(HBLANK(pipe),
4887
           (adjusted_mode->crtc_hblank_start - 1) |
4888
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
4889
    I915_WRITE(HSYNC(pipe),
4890
           (adjusted_mode->crtc_hsync_start - 1) |
4891
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
4892
 
4893
    I915_WRITE(VTOTAL(pipe),
4894
           (adjusted_mode->crtc_vdisplay - 1) |
4895
           ((adjusted_mode->crtc_vtotal - 1) << 16));
4896
    I915_WRITE(VBLANK(pipe),
4897
           (adjusted_mode->crtc_vblank_start - 1) |
4898
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
4899
    I915_WRITE(VSYNC(pipe),
4900
           (adjusted_mode->crtc_vsync_start - 1) |
4901
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
4902
 
4903
    /* pipesrc and dspsize control the size that is scaled from,
4904
     * which should always be the user's requested size.
4905
     */
4906
    I915_WRITE(DSPSIZE(plane),
4907
           ((mode->vdisplay - 1) << 16) |
4908
           (mode->hdisplay - 1));
4909
    I915_WRITE(DSPPOS(plane), 0);
4910
    I915_WRITE(PIPESRC(pipe),
4911
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4912
 
4913
    I915_WRITE(PIPECONF(pipe), pipeconf);
4914
    POSTING_READ(PIPECONF(pipe));
4915
    intel_enable_pipe(dev_priv, pipe, false);
4916
 
4917
    intel_wait_for_vblank(dev, pipe);
4918
 
4919
    I915_WRITE(DSPCNTR(plane), dspcntr);
4920
    POSTING_READ(DSPCNTR(plane));
4921
    intel_enable_plane(dev_priv, plane, pipe);
4922
 
4923
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
4924
 
4925
    intel_update_watermarks(dev);
4926
 
4927
    return ret;
4928
}
4929
 
4930
static void ironlake_update_pch_refclk(struct drm_device *dev)
4931
{
4932
	struct drm_i915_private *dev_priv = dev->dev_private;
4933
	struct drm_mode_config *mode_config = &dev->mode_config;
4934
	struct drm_crtc *crtc;
4935
	struct intel_encoder *encoder;
4936
	struct intel_encoder *has_edp_encoder = NULL;
4937
	u32 temp;
4938
	bool has_lvds = false;
4939
 
4940
	/* We need to take the global config into account */
4941
	list_for_each_entry(crtc, &mode_config->crtc_list, head) {
4942
		if (!crtc->enabled)
4943
			continue;
4944
 
4945
		list_for_each_entry(encoder, &mode_config->encoder_list,
4946
				    base.head) {
4947
			if (encoder->base.crtc != crtc)
4948
				continue;
4949
 
4950
			switch (encoder->type) {
4951
			case INTEL_OUTPUT_LVDS:
4952
				has_lvds = true;
4953
			case INTEL_OUTPUT_EDP:
4954
				has_edp_encoder = encoder;
4955
				break;
4956
			}
4957
		}
4958
	}
4959
 
4960
	/* Ironlake: try to setup display ref clock before DPLL
4961
	 * enabling. This is only under driver's control after
4962
	 * PCH B stepping, previous chipset stepping should be
4963
	 * ignoring this setting.
4964
	 */
4965
	temp = I915_READ(PCH_DREF_CONTROL);
4966
	/* Always enable nonspread source */
4967
	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
4968
	temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4969
	temp &= ~DREF_SSC_SOURCE_MASK;
4970
	temp |= DREF_SSC_SOURCE_ENABLE;
4971
	I915_WRITE(PCH_DREF_CONTROL, temp);
4972
 
4973
	POSTING_READ(PCH_DREF_CONTROL);
4974
	udelay(200);
4975
 
4976
	if (has_edp_encoder) {
4977
		if (intel_panel_use_ssc(dev_priv)) {
4978
			temp |= DREF_SSC1_ENABLE;
4979
			I915_WRITE(PCH_DREF_CONTROL, temp);
4980
 
4981
			POSTING_READ(PCH_DREF_CONTROL);
4982
			udelay(200);
4983
		}
4984
		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4985
 
4986
		/* Enable CPU source on CPU attached eDP */
4987
		if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4988
			if (intel_panel_use_ssc(dev_priv))
4989
				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4990
			else
4991
				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4992
		} else {
4993
			/* Enable SSC on PCH eDP if needed */
4994
			if (intel_panel_use_ssc(dev_priv)) {
4995
				DRM_ERROR("enabling SSC on PCH\n");
4996
				temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
4997
			}
4998
		}
4999
		I915_WRITE(PCH_DREF_CONTROL, temp);
5000
		POSTING_READ(PCH_DREF_CONTROL);
5001
		udelay(200);
5002
	}
5003
}
5004
 
5005
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5006
                  struct drm_display_mode *mode,
5007
                  struct drm_display_mode *adjusted_mode,
5008
                  int x, int y,
5009
                  struct drm_framebuffer *old_fb)
5010
{
5011
    struct drm_device *dev = crtc->dev;
5012
    struct drm_i915_private *dev_priv = dev->dev_private;
5013
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5014
    int pipe = intel_crtc->pipe;
5015
    int plane = intel_crtc->plane;
5016
    int refclk, num_connectors = 0;
5017
    intel_clock_t clock, reduced_clock;
5018
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5019
    bool ok, has_reduced_clock = false, is_sdvo = false;
5020
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5021
    struct intel_encoder *has_edp_encoder = NULL;
5022
    struct drm_mode_config *mode_config = &dev->mode_config;
5023
    struct intel_encoder *encoder;
5024
    const intel_limit_t *limit;
5025
    int ret;
5026
    struct fdi_m_n m_n = {0};
5027
    u32 temp;
5028
    u32 lvds_sync = 0;
5029
    int target_clock, pixel_multiplier, lane, link_bw, factor;
5030
    unsigned int pipe_bpp;
5031
    bool dither;
5032
 
5033
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5034
        if (encoder->base.crtc != crtc)
5035
            continue;
5036
 
5037
        switch (encoder->type) {
5038
        case INTEL_OUTPUT_LVDS:
5039
            is_lvds = true;
5040
            break;
5041
        case INTEL_OUTPUT_SDVO:
5042
        case INTEL_OUTPUT_HDMI:
5043
            is_sdvo = true;
5044
            if (encoder->needs_tv_clock)
5045
                is_tv = true;
5046
            break;
5047
        case INTEL_OUTPUT_TVOUT:
5048
            is_tv = true;
5049
            break;
5050
        case INTEL_OUTPUT_ANALOG:
5051
            is_crt = true;
5052
            break;
5053
        case INTEL_OUTPUT_DISPLAYPORT:
5054
            is_dp = true;
5055
            break;
5056
        case INTEL_OUTPUT_EDP:
5057
            has_edp_encoder = encoder;
5058
            break;
5059
        }
5060
 
5061
        num_connectors++;
5062
    }
5063
 
5064
    if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5065
        refclk = dev_priv->lvds_ssc_freq * 1000;
5066
        DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5067
                  refclk / 1000);
5068
    } else {
5069
        refclk = 96000;
5070
        if (!has_edp_encoder ||
5071
            intel_encoder_is_pch_edp(&has_edp_encoder->base))
5072
            refclk = 120000; /* 120Mhz refclk */
5073
    }
5074
 
5075
    /*
5076
     * Returns a set of divisors for the desired target clock with the given
5077
     * refclk, or FALSE.  The returned values represent the clock equation:
5078
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5079
     */
5080
    limit = intel_limit(crtc, refclk);
5081
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
5082
    if (!ok) {
5083
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
5084
        return -EINVAL;
5085
    }
5086
 
5087
    /* Ensure that the cursor is valid for the new mode before changing... */
5088
//    intel_crtc_update_cursor(crtc, true);
5089
 
5090
    if (is_lvds && dev_priv->lvds_downclock_avail) {
5091
        has_reduced_clock = limit->find_pll(limit, crtc,
5092
                            dev_priv->lvds_downclock,
5093
                            refclk,
5094
                            &reduced_clock);
5095
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
5096
            /*
5097
             * If the different P is found, it means that we can't
5098
             * switch the display clock by using the FP0/FP1.
5099
             * In such case we will disable the LVDS downclock
5100
             * feature.
5101
             */
5102
            DRM_DEBUG_KMS("Different P is found for "
5103
                      "LVDS clock/downclock\n");
5104
            has_reduced_clock = 0;
5105
        }
5106
    }
5107
    /* SDVO TV has fixed PLL values depend on its clock range,
5108
       this mirrors vbios setting. */
5109
    if (is_sdvo && is_tv) {
5110
        if (adjusted_mode->clock >= 100000
5111
            && adjusted_mode->clock < 140500) {
5112
            clock.p1 = 2;
5113
            clock.p2 = 10;
5114
            clock.n = 3;
5115
            clock.m1 = 16;
5116
            clock.m2 = 8;
5117
        } else if (adjusted_mode->clock >= 140500
5118
               && adjusted_mode->clock <= 200000) {
5119
            clock.p1 = 1;
5120
            clock.p2 = 10;
5121
            clock.n = 6;
5122
            clock.m1 = 12;
5123
            clock.m2 = 8;
5124
        }
5125
    }
5126
 
5127
    /* FDI link */
5128
    pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5129
    lane = 0;
5130
    /* CPU eDP doesn't require FDI link, so just set DP M/N
5131
       according to current link config */
5132
    if (has_edp_encoder &&
5133
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5134
        target_clock = mode->clock;
5135
        intel_edp_link_config(has_edp_encoder,
5136
                      &lane, &link_bw);
5137
    } else {
5138
        /* [e]DP over FDI requires target mode clock
5139
           instead of link clock */
5140
        if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5141
            target_clock = mode->clock;
5142
        else
5143
            target_clock = adjusted_mode->clock;
5144
 
5145
        /* FDI is a binary signal running at ~2.7GHz, encoding
5146
         * each output octet as 10 bits. The actual frequency
5147
         * is stored as a divider into a 100MHz clock, and the
5148
         * mode pixel clock is stored in units of 1KHz.
5149
         * Hence the bw of each lane in terms of the mode signal
5150
         * is:
5151
         */
5152
        link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5153
    }
5154
 
5155
    /* determine panel color depth */
5156
    temp = I915_READ(PIPECONF(pipe));
5157
    temp &= ~PIPE_BPC_MASK;
5158
    dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
5159
    switch (pipe_bpp) {
5160
    case 18:
5161
        temp |= PIPE_6BPC;
5162
        break;
5163
    case 24:
5164
        temp |= PIPE_8BPC;
5165
        break;
5166
    case 30:
5167
        temp |= PIPE_10BPC;
5168
        break;
5169
    case 36:
5170
        temp |= PIPE_12BPC;
5171
        break;
5172
    default:
5173
        WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5174
            pipe_bpp);
5175
        temp |= PIPE_8BPC;
5176
        pipe_bpp = 24;
5177
        break;
5178
    }
5179
 
5180
    intel_crtc->bpp = pipe_bpp;
5181
    I915_WRITE(PIPECONF(pipe), temp);
5182
 
5183
    if (!lane) {
5184
        /*
5185
         * Account for spread spectrum to avoid
5186
         * oversubscribing the link. Max center spread
5187
         * is 2.5%; use 5% for safety's sake.
5188
         */
5189
        u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5190
        lane = bps / (link_bw * 8) + 1;
5191
    }
5192
 
5193
    intel_crtc->fdi_lanes = lane;
5194
 
5195
    if (pixel_multiplier > 1)
5196
        link_bw *= pixel_multiplier;
5197
    ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5198
                 &m_n);
5199
 
5200
    ironlake_update_pch_refclk(dev);
5201
 
5202
    fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5203
    if (has_reduced_clock)
5204
        fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5205
            reduced_clock.m2;
5206
 
5207
    /* Enable autotuning of the PLL clock (if permissible) */
5208
    factor = 21;
5209
    if (is_lvds) {
5210
        if ((intel_panel_use_ssc(dev_priv) &&
5211
             dev_priv->lvds_ssc_freq == 100) ||
5212
            (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5213
            factor = 25;
5214
    } else if (is_sdvo && is_tv)
5215
        factor = 20;
5216
 
5217
    if (clock.m < factor * clock.n)
5218
        fp |= FP_CB_TUNE;
5219
 
5220
    dpll = 0;
5221
 
5222
    if (is_lvds)
5223
        dpll |= DPLLB_MODE_LVDS;
5224
    else
5225
        dpll |= DPLLB_MODE_DAC_SERIAL;
5226
    if (is_sdvo) {
5227
        int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5228
        if (pixel_multiplier > 1) {
5229
            dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5230
        }
5231
        dpll |= DPLL_DVO_HIGH_SPEED;
5232
    }
5233
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5234
        dpll |= DPLL_DVO_HIGH_SPEED;
5235
 
5236
    /* compute bitmask from p1 value */
5237
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5238
    /* also FPA1 */
5239
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5240
 
5241
    switch (clock.p2) {
5242
    case 5:
5243
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5244
        break;
5245
    case 7:
5246
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5247
        break;
5248
    case 10:
5249
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5250
        break;
5251
    case 14:
5252
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5253
        break;
5254
    }
5255
 
5256
    if (is_sdvo && is_tv)
5257
        dpll |= PLL_REF_INPUT_TVCLKINBC;
5258
    else if (is_tv)
5259
        /* XXX: just matching BIOS for now */
5260
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
5261
        dpll |= 3;
5262
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5263
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5264
    else
5265
        dpll |= PLL_REF_INPUT_DREFCLK;
5266
 
5267
    /* setup pipeconf */
5268
    pipeconf = I915_READ(PIPECONF(pipe));
5269
 
5270
    /* Set up the display plane register */
5271
    dspcntr = DISPPLANE_GAMMA_ENABLE;
5272
 
5273
    DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5274
    drm_mode_debug_printmodeline(mode);
5275
 
5276
    /* PCH eDP needs FDI, but CPU eDP does not */
5277
    if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5278
        I915_WRITE(PCH_FP0(pipe), fp);
5279
        I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5280
 
5281
        POSTING_READ(PCH_DPLL(pipe));
5282
        udelay(150);
5283
    }
5284
 
5285
    /* enable transcoder DPLL */
5286
    if (HAS_PCH_CPT(dev)) {
5287
        temp = I915_READ(PCH_DPLL_SEL);
5288
        switch (pipe) {
5289
        case 0:
5290
            temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
5291
            break;
5292
        case 1:
5293
            temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
5294
            break;
5295
        case 2:
5296
            /* FIXME: manage transcoder PLLs? */
5297
            temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
5298
            break;
5299
        default:
5300
            BUG();
5301
        }
5302
        I915_WRITE(PCH_DPLL_SEL, temp);
5303
 
5304
        POSTING_READ(PCH_DPLL_SEL);
5305
        udelay(150);
5306
    }
5307
 
5308
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5309
     * This is an exception to the general rule that mode_set doesn't turn
5310
     * things on.
5311
     */
5312
    if (is_lvds) {
5313
        temp = I915_READ(PCH_LVDS);
5314
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5315
        if (pipe == 1) {
5316
            if (HAS_PCH_CPT(dev))
5317
                temp |= PORT_TRANS_B_SEL_CPT;
5318
            else
5319
                temp |= LVDS_PIPEB_SELECT;
5320
        } else {
5321
            if (HAS_PCH_CPT(dev))
5322
                temp &= ~PORT_TRANS_SEL_MASK;
5323
            else
5324
                temp &= ~LVDS_PIPEB_SELECT;
5325
        }
5326
        /* set the corresponsding LVDS_BORDER bit */
5327
        temp |= dev_priv->lvds_border_bits;
5328
        /* Set the B0-B3 data pairs corresponding to whether we're going to
5329
         * set the DPLLs for dual-channel mode or not.
5330
         */
5331
        if (clock.p2 == 7)
5332
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5333
        else
5334
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5335
 
5336
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5337
         * appropriately here, but we need to look more thoroughly into how
5338
         * panels behave in the two modes.
5339
         */
5340
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5341
            lvds_sync |= LVDS_HSYNC_POLARITY;
5342
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5343
            lvds_sync |= LVDS_VSYNC_POLARITY;
5344
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5345
            != lvds_sync) {
5346
            char flags[2] = "-+";
5347
            DRM_INFO("Changing LVDS panel from "
5348
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5349
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
5350
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
5351
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5352
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5353
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5354
            temp |= lvds_sync;
5355
        }
5356
        I915_WRITE(PCH_LVDS, temp);
5357
    }
5358
 
5359
    pipeconf &= ~PIPECONF_DITHER_EN;
5360
    pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5361
    if ((is_lvds && dev_priv->lvds_dither) || dither) {
5362
        pipeconf |= PIPECONF_DITHER_EN;
5363
        pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5364
    }
5365
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5366
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
5367
    } else {
5368
        /* For non-DP output, clear any trans DP clock recovery setting.*/
5369
        I915_WRITE(TRANSDATA_M1(pipe), 0);
5370
        I915_WRITE(TRANSDATA_N1(pipe), 0);
5371
        I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5372
        I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5373
    }
5374
 
5375
    if (!has_edp_encoder ||
5376
        intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5377
        I915_WRITE(PCH_DPLL(pipe), dpll);
5378
 
5379
        /* Wait for the clocks to stabilize. */
5380
        POSTING_READ(PCH_DPLL(pipe));
5381
        udelay(150);
5382
 
5383
        /* The pixel multiplier can only be updated once the
5384
         * DPLL is enabled and the clocks are stable.
5385
         *
5386
         * So write it again.
5387
         */
5388
        I915_WRITE(PCH_DPLL(pipe), dpll);
5389
    }
5390
 
5391
    intel_crtc->lowfreq_avail = false;
5392
    if (is_lvds && has_reduced_clock && i915_powersave) {
5393
        I915_WRITE(PCH_FP1(pipe), fp2);
5394
        intel_crtc->lowfreq_avail = true;
5395
        if (HAS_PIPE_CXSR(dev)) {
5396
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5397
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5398
        }
5399
    } else {
5400
        I915_WRITE(PCH_FP1(pipe), fp);
5401
        if (HAS_PIPE_CXSR(dev)) {
5402
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5403
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5404
        }
5405
    }
5406
 
5407
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5408
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5409
        /* the chip adds 2 halflines automatically */
5410
        adjusted_mode->crtc_vdisplay -= 1;
5411
        adjusted_mode->crtc_vtotal -= 1;
5412
        adjusted_mode->crtc_vblank_start -= 1;
5413
        adjusted_mode->crtc_vblank_end -= 1;
5414
        adjusted_mode->crtc_vsync_end -= 1;
5415
        adjusted_mode->crtc_vsync_start -= 1;
5416
    } else
5417
        pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
5418
 
5419
    I915_WRITE(HTOTAL(pipe),
5420
           (adjusted_mode->crtc_hdisplay - 1) |
5421
           ((adjusted_mode->crtc_htotal - 1) << 16));
5422
    I915_WRITE(HBLANK(pipe),
5423
           (adjusted_mode->crtc_hblank_start - 1) |
5424
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
5425
    I915_WRITE(HSYNC(pipe),
5426
           (adjusted_mode->crtc_hsync_start - 1) |
5427
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
5428
 
5429
    I915_WRITE(VTOTAL(pipe),
5430
           (adjusted_mode->crtc_vdisplay - 1) |
5431
           ((adjusted_mode->crtc_vtotal - 1) << 16));
5432
    I915_WRITE(VBLANK(pipe),
5433
           (adjusted_mode->crtc_vblank_start - 1) |
5434
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
5435
    I915_WRITE(VSYNC(pipe),
5436
           (adjusted_mode->crtc_vsync_start - 1) |
5437
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
5438
 
5439
    /* pipesrc controls the size that is scaled from, which should
5440
     * always be the user's requested size.
5441
     */
5442
    I915_WRITE(PIPESRC(pipe),
5443
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5444
 
5445
    I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5446
    I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5447
    I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5448
    I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5449
 
5450
    if (has_edp_encoder &&
5451
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5452
        ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5453
    }
5454
 
5455
    I915_WRITE(PIPECONF(pipe), pipeconf);
5456
    POSTING_READ(PIPECONF(pipe));
5457
 
5458
    intel_wait_for_vblank(dev, pipe);
5459
 
5460
    if (IS_GEN5(dev)) {
5461
        /* enable address swizzle for tiling buffer */
5462
        temp = I915_READ(DISP_ARB_CTL);
5463
        I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
5464
    }
5465
 
5466
    I915_WRITE(DSPCNTR(plane), dspcntr);
5467
    POSTING_READ(DSPCNTR(plane));
5468
 
5469
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
5470
 
5471
    intel_update_watermarks(dev);
5472
 
5473
    return ret;
5474
}
5475
 
5476
 
5477
 
5478
 
5479
 
5480
 
5481
 
5482
 
5483
/** Loads the palette/gamma unit for the CRTC with the prepared values */
5484
void intel_crtc_load_lut(struct drm_crtc *crtc)
5485
{
5486
	struct drm_device *dev = crtc->dev;
5487
	struct drm_i915_private *dev_priv = dev->dev_private;
5488
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5489
	int palreg = PALETTE(intel_crtc->pipe);
5490
	int i;
5491
 
5492
	/* The clocks have to be on to load the palette. */
5493
	if (!crtc->enabled)
5494
		return;
5495
 
5496
	/* use legacy palette for Ironlake */
5497
	if (HAS_PCH_SPLIT(dev))
5498
		palreg = LGC_PALETTE(intel_crtc->pipe);
5499
 
5500
	for (i = 0; i < 256; i++) {
5501
		I915_WRITE(palreg + 4 * i,
5502
			   (intel_crtc->lut_r[i] << 16) |
5503
			   (intel_crtc->lut_g[i] << 8) |
5504
			   intel_crtc->lut_b[i]);
5505
	}
5506
}
5507
 
5508
 
5509
 
5510
 
5511
 
5512
 
5513
 
5514
 
5515
 
5516
 
5517
 
5518
 
5519
 
5520
 
5521
 
5522
 
5523
 
5524
 
5525
 
5526
 
5527
 
5528
 
5529
 
5530
 
5531
 
5532
 
5533
 
5534
 
5535
 
5536
 
5537
 
5538
 
5539
 
5540
 
5541
 
5542
 
5543
 
5544
 
5545
 
5546
 
5547
 
5548
 
5549
 
5550
 
5551
 
5552
 
5553
 
5554
 
5555
 
5556
 
5557
 
5558
 
5559
 
5560
 
5561
 
5562
 
5563
 
5564
 
5565
 
5566
 
5567
 
5568
 
5569
 
5570
 
5571
 
5572
 
5573
 
5574
 
5575
 
5576
 
5577
 
5578
 
5579
 
5580
 
5581
 
5582
 
5583
 
5584
 
5585
 
5586
 
5587
 
5588
 
5589
 
5590
 
5591
 
5592
 
5593
 
5594
 
5595
 
5596
 
5597
 
5598
 
5599
 
5600
 
5601
 
5602
 
5603
 
5604
 
5605
 
5606
static void intel_increase_pllclock(struct drm_crtc *crtc)
5607
{
5608
	struct drm_device *dev = crtc->dev;
5609
	drm_i915_private_t *dev_priv = dev->dev_private;
5610
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5611
	int pipe = intel_crtc->pipe;
5612
	int dpll_reg = DPLL(pipe);
5613
	int dpll;
5614
 
5615
	if (HAS_PCH_SPLIT(dev))
5616
		return;
5617
 
5618
	if (!dev_priv->lvds_downclock_avail)
5619
		return;
5620
 
5621
	dpll = I915_READ(dpll_reg);
5622
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
5623
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
5624
 
5625
		/* Unlock panel regs */
5626
		I915_WRITE(PP_CONTROL,
5627
			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
5628
 
5629
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
5630
		I915_WRITE(dpll_reg, dpll);
5631
		intel_wait_for_vblank(dev, pipe);
5632
 
5633
		dpll = I915_READ(dpll_reg);
5634
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
5635
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
5636
 
5637
		/* ...and lock them again */
5638
		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
5639
	}
5640
 
5641
	/* Schedule downclock */
5642
//	mod_timer(&intel_crtc->idle_timer, jiffies +
5643
//		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5644
}
5645
 
5646
 
5647
 
5648
 
5649
 
5650
 
5651
 
5652
 
5653
 
5654
 
5655
 
5656
 
5657
 
5658
 
5659
 
5660
 
5661
 
5662
 
5663
 
5664
 
5665
 
5666
 
5667
 
5668
 
5669
 
5670
 
5671
 
5672
 
5673
 
5674
 
5675
 
5676
 
5677
 
5678
 
5679
 
5680
 
5681
 
5682
 
5683
 
5684
 
5685
 
5686
 
5687
 
5688
 
5689
 
5690
 
5691
 
5692
 
5693
 
5694
 
5695
 
5696
 
5697
 
5698
 
5699
 
5700
 
5701
 
5702
 
5703
 
5704
 
5705
 
5706
 
5707
 
5708
 
5709
 
5710
 
5711
 
5712
 
5713
 
5714
 
5715
 
5716
 
5717
 
5718
 
5719
 
5720
 
5721
 
5722
 
5723
 
5724
 
5725
 
5726
 
5727
 
5728
 
5729
 
5730
 
5731
 
5732
 
5733
 
5734
 
5735
 
5736
 
5737
 
5738
 
5739
 
5740
 
5741
 
5742
 
5743
 
5744
 
5745
 
5746
 
5747
 
5748
 
5749
 
5750
 
5751
 
5752
 
5753
 
5754
 
5755
 
5756
 
5757
 
5758
 
5759
 
5760
 
5761
 
5762
 
5763
 
5764
 
5765
 
5766
 
5767
 
5768
 
5769
 
5770
 
5771
 
5772
 
5773
 
5774
 
5775
 
5776
 
5777
 
5778
 
5779
 
5780
 
5781
 
5782
 
5783
 
5784
 
5785
 
5786
 
5787
 
5788
 
5789
 
5790
 
5791
 
5792
 
5793
 
5794
 
5795
 
5796
 
5797
 
5798
static const struct drm_mode_config_funcs intel_mode_funcs = {
5799
	.fb_create = NULL /*intel_user_framebuffer_create*/,
5800
	.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
5801
};
5802
 
5803
 
5804
 
5805
 
5806
 
5807
 
5808
 
5809
 
5810
 
5811
 
5812
 
5813
 
5814
 
5815
 
5816
 
5817
 
5818
 
5819
 
5820
 
5821
 
5822
 
5823
 
5824
 
5825
 
5826
 
5827
 
5828
 
5829
 
5830
 
5831
 
5832
 
5833
 
5834
 
5835
 
5836
 
5837
 
5838
 
5839
 
5840
 
5841
 
5842
 
5843
 
5844
 
5845
 
5846
 
5847
 
5848
 
5849
 
5850
 
5851
 
5852
 
5853
 
5854
 
5855
 
5856
 
5857
 
5858
 
5859
 
5860
 
5861
 
5862
 
5863
 
5864
 
5865
 
5866
 
5867
 
5868
 
5869
 
5870
 
5871
 
5872
 
5873
 
5874
 
5875
 
5876
 
5877
 
5878
 
5879
 
5880
 
5881
 
5882
 
5883
 
5884
static void ironlake_init_clock_gating(struct drm_device *dev)
5885
{
5886
    struct drm_i915_private *dev_priv = dev->dev_private;
5887
    uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
5888
 
5889
    /* Required for FBC */
5890
    dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
5891
        DPFCRUNIT_CLOCK_GATE_DISABLE |
5892
        DPFDUNIT_CLOCK_GATE_DISABLE;
5893
    /* Required for CxSR */
5894
    dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
5895
 
5896
    I915_WRITE(PCH_3DCGDIS0,
5897
           MARIUNIT_CLOCK_GATE_DISABLE |
5898
           SVSMUNIT_CLOCK_GATE_DISABLE);
5899
    I915_WRITE(PCH_3DCGDIS1,
5900
           VFMUNIT_CLOCK_GATE_DISABLE);
5901
 
5902
    I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
5903
 
5904
    /*
5905
     * According to the spec the following bits should be set in
5906
     * order to enable memory self-refresh
5907
     * The bit 22/21 of 0x42004
5908
     * The bit 5 of 0x42020
5909
     * The bit 15 of 0x45000
5910
     */
5911
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
5912
           (I915_READ(ILK_DISPLAY_CHICKEN2) |
5913
            ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5914
    I915_WRITE(ILK_DSPCLK_GATE,
5915
           (I915_READ(ILK_DSPCLK_GATE) |
5916
            ILK_DPARB_CLK_GATE));
5917
    I915_WRITE(DISP_ARB_CTL,
5918
           (I915_READ(DISP_ARB_CTL) |
5919
            DISP_FBC_WM_DIS));
5920
    I915_WRITE(WM3_LP_ILK, 0);
5921
    I915_WRITE(WM2_LP_ILK, 0);
5922
    I915_WRITE(WM1_LP_ILK, 0);
5923
 
5924
    /*
5925
     * Based on the document from hardware guys the following bits
5926
     * should be set unconditionally in order to enable FBC.
5927
     * The bit 22 of 0x42000
5928
     * The bit 22 of 0x42004
5929
     * The bit 7,8,9 of 0x42020.
5930
     */
5931
    if (IS_IRONLAKE_M(dev)) {
5932
        I915_WRITE(ILK_DISPLAY_CHICKEN1,
5933
               I915_READ(ILK_DISPLAY_CHICKEN1) |
5934
               ILK_FBCQ_DIS);
5935
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
5936
               I915_READ(ILK_DISPLAY_CHICKEN2) |
5937
               ILK_DPARB_GATE);
5938
        I915_WRITE(ILK_DSPCLK_GATE,
5939
               I915_READ(ILK_DSPCLK_GATE) |
5940
               ILK_DPFC_DIS1 |
5941
               ILK_DPFC_DIS2 |
5942
               ILK_CLK_FBC);
5943
    }
5944
 
5945
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
5946
           I915_READ(ILK_DISPLAY_CHICKEN2) |
5947
           ILK_ELPIN_409_SELECT);
5948
    I915_WRITE(_3D_CHICKEN2,
5949
           _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
5950
           _3D_CHICKEN2_WM_READ_PIPELINED);
5951
}
5952
 
5953
static void gen6_init_clock_gating(struct drm_device *dev)
5954
{
5955
	struct drm_i915_private *dev_priv = dev->dev_private;
5956
	int pipe;
5957
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
5958
 
5959
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
5960
 
5961
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
5962
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
5963
		   ILK_ELPIN_409_SELECT);
5964
 
5965
	I915_WRITE(WM3_LP_ILK, 0);
5966
	I915_WRITE(WM2_LP_ILK, 0);
5967
	I915_WRITE(WM1_LP_ILK, 0);
5968
 
5969
	/*
5970
	 * According to the spec the following bits should be
5971
	 * set in order to enable memory self-refresh and fbc:
5972
	 * The bit21 and bit22 of 0x42000
5973
	 * The bit21 and bit22 of 0x42004
5974
	 * The bit5 and bit7 of 0x42020
5975
	 * The bit14 of 0x70180
5976
	 * The bit14 of 0x71180
5977
	 */
5978
	I915_WRITE(ILK_DISPLAY_CHICKEN1,
5979
		   I915_READ(ILK_DISPLAY_CHICKEN1) |
5980
		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
5981
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
5982
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
5983
		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
5984
	I915_WRITE(ILK_DSPCLK_GATE,
5985
		   I915_READ(ILK_DSPCLK_GATE) |
5986
		   ILK_DPARB_CLK_GATE  |
5987
		   ILK_DPFD_CLK_GATE);
5988
 
5989
	for_each_pipe(pipe) {
5990
		I915_WRITE(DSPCNTR(pipe),
5991
			   I915_READ(DSPCNTR(pipe)) |
5992
			   DISPPLANE_TRICKLE_FEED_DISABLE);
5993
		intel_flush_display_plane(dev_priv, pipe);
5994
	}
5995
}
5996
 
5997
static void ivybridge_init_clock_gating(struct drm_device *dev)
5998
{
5999
	struct drm_i915_private *dev_priv = dev->dev_private;
6000
	int pipe;
6001
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
6002
 
6003
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
6004
 
6005
	I915_WRITE(WM3_LP_ILK, 0);
6006
	I915_WRITE(WM2_LP_ILK, 0);
6007
	I915_WRITE(WM1_LP_ILK, 0);
6008
 
6009
	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
6010
 
6011
	for_each_pipe(pipe) {
6012
		I915_WRITE(DSPCNTR(pipe),
6013
			   I915_READ(DSPCNTR(pipe)) |
6014
			   DISPPLANE_TRICKLE_FEED_DISABLE);
6015
		intel_flush_display_plane(dev_priv, pipe);
6016
	}
6017
}
6018
 
6019
static void g4x_init_clock_gating(struct drm_device *dev)
6020
{
6021
    struct drm_i915_private *dev_priv = dev->dev_private;
6022
    uint32_t dspclk_gate;
6023
 
6024
    I915_WRITE(RENCLK_GATE_D1, 0);
6025
    I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
6026
           GS_UNIT_CLOCK_GATE_DISABLE |
6027
           CL_UNIT_CLOCK_GATE_DISABLE);
6028
    I915_WRITE(RAMCLK_GATE_D, 0);
6029
    dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
6030
        OVRUNIT_CLOCK_GATE_DISABLE |
6031
        OVCUNIT_CLOCK_GATE_DISABLE;
6032
    if (IS_GM45(dev))
6033
        dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
6034
    I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
6035
}
6036
 
6037
static void crestline_init_clock_gating(struct drm_device *dev)
6038
{
6039
	struct drm_i915_private *dev_priv = dev->dev_private;
6040
 
6041
	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
6042
	I915_WRITE(RENCLK_GATE_D2, 0);
6043
	I915_WRITE(DSPCLK_GATE_D, 0);
6044
	I915_WRITE(RAMCLK_GATE_D, 0);
6045
	I915_WRITE16(DEUC, 0);
6046
}
6047
 
6048
static void broadwater_init_clock_gating(struct drm_device *dev)
6049
{
6050
	struct drm_i915_private *dev_priv = dev->dev_private;
6051
 
6052
	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
6053
		   I965_RCC_CLOCK_GATE_DISABLE |
6054
		   I965_RCPB_CLOCK_GATE_DISABLE |
6055
		   I965_ISC_CLOCK_GATE_DISABLE |
6056
		   I965_FBC_CLOCK_GATE_DISABLE);
6057
	I915_WRITE(RENCLK_GATE_D2, 0);
6058
}
6059
 
6060
static void gen3_init_clock_gating(struct drm_device *dev)
6061
{
6062
    struct drm_i915_private *dev_priv = dev->dev_private;
6063
    u32 dstate = I915_READ(D_STATE);
6064
 
6065
    dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
6066
        DSTATE_DOT_CLOCK_GATING;
6067
    I915_WRITE(D_STATE, dstate);
6068
}
6069
 
6070
static void i85x_init_clock_gating(struct drm_device *dev)
6071
{
6072
	struct drm_i915_private *dev_priv = dev->dev_private;
6073
 
6074
	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
6075
}
6076
 
6077
static void i830_init_clock_gating(struct drm_device *dev)
6078
{
6079
	struct drm_i915_private *dev_priv = dev->dev_private;
6080
 
6081
	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6082
}
6083
 
6084
static void ibx_init_clock_gating(struct drm_device *dev)
6085
{
6086
    struct drm_i915_private *dev_priv = dev->dev_private;
6087
 
6088
    /*
6089
     * On Ibex Peak and Cougar Point, we need to disable clock
6090
     * gating for the panel power sequencer or it will fail to
6091
     * start up when no ports are active.
6092
     */
6093
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6094
}
6095
 
6096
static void cpt_init_clock_gating(struct drm_device *dev)
6097
{
6098
    struct drm_i915_private *dev_priv = dev->dev_private;
6099
    int pipe;
6100
 
6101
    /*
6102
     * On Ibex Peak and Cougar Point, we need to disable clock
6103
     * gating for the panel power sequencer or it will fail to
6104
     * start up when no ports are active.
6105
     */
6106
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6107
    I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6108
           DPLS_EDP_PPS_FIX_DIS);
6109
    /* Without this, mode sets may fail silently on FDI */
6110
    for_each_pipe(pipe)
6111
        I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
6112
}
6113
 
6114
 
6115
 
6116
 
6117
/* Set up chip specific display functions */
6118
static void intel_init_display(struct drm_device *dev)
6119
{
6120
    struct drm_i915_private *dev_priv = dev->dev_private;
6121
 
6122
    /* We always want a DPMS function */
6123
    if (HAS_PCH_SPLIT(dev)) {
6124
        dev_priv->display.dpms = ironlake_crtc_dpms;
6125
        dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
6126
        dev_priv->display.update_plane = ironlake_update_plane;
6127
    } else {
6128
        dev_priv->display.dpms = i9xx_crtc_dpms;
6129
        dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
6130
        dev_priv->display.update_plane = i9xx_update_plane;
6131
    }
6132
 
6133
    if (I915_HAS_FBC(dev)) {
6134
        if (HAS_PCH_SPLIT(dev)) {
6135
            dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6136
            dev_priv->display.enable_fbc = ironlake_enable_fbc;
6137
            dev_priv->display.disable_fbc = ironlake_disable_fbc;
6138
        } else if (IS_GM45(dev)) {
6139
            dev_priv->display.fbc_enabled = g4x_fbc_enabled;
6140
            dev_priv->display.enable_fbc = g4x_enable_fbc;
6141
            dev_priv->display.disable_fbc = g4x_disable_fbc;
6142
        } else if (IS_CRESTLINE(dev)) {
6143
            dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
6144
            dev_priv->display.enable_fbc = i8xx_enable_fbc;
6145
            dev_priv->display.disable_fbc = i8xx_disable_fbc;
6146
        }
6147
        /* 855GM needs testing */
6148
    }
6149
 
6150
    /* Returns the core display clock speed */
6151
    if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
6152
        dev_priv->display.get_display_clock_speed =
6153
            i945_get_display_clock_speed;
6154
    else if (IS_I915G(dev))
6155
        dev_priv->display.get_display_clock_speed =
6156
            i915_get_display_clock_speed;
6157
    else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
6158
        dev_priv->display.get_display_clock_speed =
6159
            i9xx_misc_get_display_clock_speed;
6160
    else if (IS_I915GM(dev))
6161
        dev_priv->display.get_display_clock_speed =
6162
            i915gm_get_display_clock_speed;
6163
    else if (IS_I865G(dev))
6164
        dev_priv->display.get_display_clock_speed =
6165
            i865_get_display_clock_speed;
6166
    else if (IS_I85X(dev))
6167
        dev_priv->display.get_display_clock_speed =
6168
            i855_get_display_clock_speed;
6169
    else /* 852, 830 */
6170
        dev_priv->display.get_display_clock_speed =
6171
            i830_get_display_clock_speed;
6172
 
6173
    /* For FIFO watermark updates */
6174
    if (HAS_PCH_SPLIT(dev)) {
6175
        if (HAS_PCH_IBX(dev))
6176
            dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
6177
        else if (HAS_PCH_CPT(dev))
6178
            dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
6179
 
6180
        if (IS_GEN5(dev)) {
6181
            if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
6182
                dev_priv->display.update_wm = ironlake_update_wm;
6183
            else {
6184
                DRM_DEBUG_KMS("Failed to get proper latency. "
6185
                          "Disable CxSR\n");
6186
                dev_priv->display.update_wm = NULL;
6187
            }
6188
            dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
6189
            dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6190
        } else if (IS_GEN6(dev)) {
6191
            if (SNB_READ_WM0_LATENCY()) {
6192
                dev_priv->display.update_wm = sandybridge_update_wm;
6193
            } else {
6194
                DRM_DEBUG_KMS("Failed to read display plane latency. "
6195
                          "Disable CxSR\n");
6196
                dev_priv->display.update_wm = NULL;
6197
            }
6198
            dev_priv->display.fdi_link_train = gen6_fdi_link_train;
6199
            dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6200
        } else if (IS_IVYBRIDGE(dev)) {
6201
            /* FIXME: detect B0+ stepping and use auto training */
6202
            dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
6203
            if (SNB_READ_WM0_LATENCY()) {
6204
                dev_priv->display.update_wm = sandybridge_update_wm;
6205
            } else {
6206
                DRM_DEBUG_KMS("Failed to read display plane latency. "
6207
                          "Disable CxSR\n");
6208
                dev_priv->display.update_wm = NULL;
6209
            }
6210
            dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6211
 
6212
        } else
6213
            dev_priv->display.update_wm = NULL;
6214
    } else if (IS_PINEVIEW(dev)) {
6215
        if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6216
                        dev_priv->is_ddr3,
6217
                        dev_priv->fsb_freq,
6218
                        dev_priv->mem_freq)) {
6219
            DRM_INFO("failed to find known CxSR latency "
6220
                 "(found ddr%s fsb freq %d, mem freq %d), "
6221
                 "disabling CxSR\n",
6222
                 (dev_priv->is_ddr3 == 1) ? "3": "2",
6223
                 dev_priv->fsb_freq, dev_priv->mem_freq);
6224
            /* Disable CxSR and never update its watermark again */
6225
            pineview_disable_cxsr(dev);
6226
            dev_priv->display.update_wm = NULL;
6227
        } else
6228
            dev_priv->display.update_wm = pineview_update_wm;
6229
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6230
    } else if (IS_G4X(dev)) {
6231
        dev_priv->display.update_wm = g4x_update_wm;
6232
        dev_priv->display.init_clock_gating = g4x_init_clock_gating;
6233
    } else if (IS_GEN4(dev)) {
6234
        dev_priv->display.update_wm = i965_update_wm;
6235
        if (IS_CRESTLINE(dev))
6236
            dev_priv->display.init_clock_gating = crestline_init_clock_gating;
6237
        else if (IS_BROADWATER(dev))
6238
            dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
6239
    } else if (IS_GEN3(dev)) {
6240
        dev_priv->display.update_wm = i9xx_update_wm;
6241
        dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6242
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6243
    } else if (IS_I865G(dev)) {
6244
        dev_priv->display.update_wm = i830_update_wm;
6245
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6246
        dev_priv->display.get_fifo_size = i830_get_fifo_size;
6247
    } else if (IS_I85X(dev)) {
6248
        dev_priv->display.update_wm = i9xx_update_wm;
6249
        dev_priv->display.get_fifo_size = i85x_get_fifo_size;
6250
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6251
    } else {
6252
        dev_priv->display.update_wm = i830_update_wm;
6253
        dev_priv->display.init_clock_gating = i830_init_clock_gating;
6254
        if (IS_845G(dev))
6255
            dev_priv->display.get_fifo_size = i845_get_fifo_size;
6256
        else
6257
            dev_priv->display.get_fifo_size = i830_get_fifo_size;
6258
    }
6259
 
6260
    /* Default just returns -ENODEV to indicate unsupported */
6261
//    dev_priv->display.queue_flip = intel_default_queue_flip;
6262
 
6263
#if 0
6264
    switch (INTEL_INFO(dev)->gen) {
6265
    case 2:
6266
        dev_priv->display.queue_flip = intel_gen2_queue_flip;
6267
        break;
6268
 
6269
    case 3:
6270
        dev_priv->display.queue_flip = intel_gen3_queue_flip;
6271
        break;
6272
 
6273
    case 4:
6274
    case 5:
6275
        dev_priv->display.queue_flip = intel_gen4_queue_flip;
6276
        break;
6277
 
6278
    case 6:
6279
        dev_priv->display.queue_flip = intel_gen6_queue_flip;
6280
        break;
6281
    case 7:
6282
        dev_priv->display.queue_flip = intel_gen7_queue_flip;
6283
        break;
6284
    }
6285
#endif
6286
}
6287
 
6288
/*
6289
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
6290
 * resume, or other times.  This quirk makes sure that's the case for
6291
 * affected systems.
6292
 */
6293
static void quirk_pipea_force (struct drm_device *dev)
6294
{
6295
    struct drm_i915_private *dev_priv = dev->dev_private;
6296
 
6297
    dev_priv->quirks |= QUIRK_PIPEA_FORCE;
6298
    DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
6299
}
6300
 
6301
/*
6302
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
6303
 */
6304
static void quirk_ssc_force_disable(struct drm_device *dev)
6305
{
6306
    struct drm_i915_private *dev_priv = dev->dev_private;
6307
    dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
6308
}
6309
 
6310
struct intel_quirk {
6311
    int device;
6312
    int subsystem_vendor;
6313
    int subsystem_device;
6314
    void (*hook)(struct drm_device *dev);
6315
};
6316
 
6317
struct intel_quirk intel_quirks[] = {
6318
    /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
6319
    { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
6320
    /* HP Mini needs pipe A force quirk (LP: #322104) */
6321
    { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
6322
 
6323
    /* Thinkpad R31 needs pipe A force quirk */
6324
    { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
6325
    /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
6326
    { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
6327
 
6328
    /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
6329
    { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
6330
    /* ThinkPad X40 needs pipe A force quirk */
6331
 
6332
    /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
6333
    { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
6334
 
6335
    /* 855 & before need to leave pipe A & dpll A up */
6336
    { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6337
    { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6338
 
6339
    /* Lenovo U160 cannot use SSC on LVDS */
6340
    { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
6341
 
6342
    /* Sony Vaio Y cannot use SSC on LVDS */
6343
    { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
6344
};
6345
 
6346
static void intel_init_quirks(struct drm_device *dev)
6347
{
6348
    struct pci_dev *d = dev->pdev;
6349
    int i;
6350
 
6351
    for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
6352
        struct intel_quirk *q = &intel_quirks[i];
6353
 
6354
        if (d->device == q->device &&
6355
            (d->subsystem_vendor == q->subsystem_vendor ||
6356
             q->subsystem_vendor == PCI_ANY_ID) &&
6357
            (d->subsystem_device == q->subsystem_device ||
6358
             q->subsystem_device == PCI_ANY_ID))
6359
            q->hook(dev);
6360
    }
6361
}
6362
 
6363
 
6364
void intel_modeset_init(struct drm_device *dev)
6365
{
6366
    struct drm_i915_private *dev_priv = dev->dev_private;
6367
    int i;
6368
 
6369
    drm_mode_config_init(dev);
6370
 
6371
    dev->mode_config.min_width = 0;
6372
    dev->mode_config.min_height = 0;
6373
 
6374
    dev->mode_config.funcs = (void *)&intel_mode_funcs;
6375
 
6376
    intel_init_quirks(dev);
6377
 
6378
    intel_init_display(dev);
6379
 
6380
    if (IS_GEN2(dev)) {
6381
        dev->mode_config.max_width = 2048;
6382
        dev->mode_config.max_height = 2048;
6383
    } else if (IS_GEN3(dev)) {
6384
        dev->mode_config.max_width = 4096;
6385
        dev->mode_config.max_height = 4096;
6386
    } else {
6387
        dev->mode_config.max_width = 8192;
6388
        dev->mode_config.max_height = 8192;
6389
    }
6390
 
6391
    dev->mode_config.fb_base = get_bus_addr();
6392
 
6393
    DRM_DEBUG_KMS("%d display pipe%s available.\n",
6394
              dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
6395
 
6396
#if 0
6397
 
6398
    for (i = 0; i < dev_priv->num_pipe; i++) {
6399
        intel_crtc_init(dev, i);
6400
    }
6401
 
6402
    /* Just disable it once at startup */
6403
    i915_disable_vga(dev);
6404
    intel_setup_outputs(dev);
6405
 
6406
    intel_init_clock_gating(dev);
6407
 
6408
    if (IS_IRONLAKE_M(dev)) {
6409
        ironlake_enable_drps(dev);
6410
        intel_init_emon(dev);
6411
    }
6412
 
6413
    if (IS_GEN6(dev) || IS_GEN7(dev)) {
6414
        gen6_enable_rps(dev_priv);
6415
        gen6_update_ring_freq(dev_priv);
6416
    }
6417
 
6418
    INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6419
    setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6420
            (unsigned long)dev);
6421
#endif
6422
 
6423
}
6424
 
6425