Subversion Repositories Kolibri OS

Rev

Rev 2335 | Rev 2339 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
27
//#include 
28
#include 
29
//#include 
30
#include 
31
#include 
2330 Serge 32
#include 
2327 Serge 33
//#include 
34
#include "drmP.h"
35
#include "intel_drv.h"
2330 Serge 36
#include "i915_drm.h"
2327 Serge 37
#include "i915_drv.h"
38
//#include "i915_trace.h"
39
#include "drm_dp_helper.h"
40
 
41
#include "drm_crtc_helper.h"
42
 
43
phys_addr_t get_bus_addr(void);
44
 
45
static inline __attribute__((const))
46
bool is_power_of_2(unsigned long n)
47
{
48
    return (n != 0 && ((n & (n - 1)) == 0));
49
}
50
 
2330 Serge 51
#define MAX_ERRNO       4095
52
 
53
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
54
 
55
static inline long IS_ERR(const void *ptr)
56
{
57
    return IS_ERR_VALUE((unsigned long)ptr);
58
}
59
 
60
static inline void *ERR_PTR(long error)
61
{
62
    return (void *) error;
63
}
64
 
65
 
2327 Serge 66
static inline int pci_read_config_word(struct pci_dev *dev, int where,
67
                    u16 *val)
68
{
69
    *val = PciRead16(dev->busnr, dev->devfn, where);
70
    return 1;
71
}
72
 
73
 
74
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
75
 
76
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
77
static void intel_update_watermarks(struct drm_device *dev);
78
static void intel_increase_pllclock(struct drm_crtc *crtc);
79
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
80
 
81
typedef struct {
82
    /* given values */
83
    int n;
84
    int m1, m2;
85
    int p1, p2;
86
    /* derived values */
87
    int dot;
88
    int vco;
89
    int m;
90
    int p;
91
} intel_clock_t;
92
 
93
typedef struct {
94
    int min, max;
95
} intel_range_t;
96
 
97
typedef struct {
98
    int dot_limit;
99
    int p2_slow, p2_fast;
100
} intel_p2_t;
101
 
102
#define INTEL_P2_NUM              2
103
typedef struct intel_limit intel_limit_t;
104
struct intel_limit {
105
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
106
    intel_p2_t      p2;
107
    bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
108
              int, int, intel_clock_t *);
109
};
110
 
111
/* FDI */
112
#define IRONLAKE_FDI_FREQ       2700000 /* in kHz for mode->clock */
113
 
114
static bool
115
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
116
            int target, int refclk, intel_clock_t *best_clock);
117
static bool
118
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
119
            int target, int refclk, intel_clock_t *best_clock);
120
 
121
static bool
122
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
123
              int target, int refclk, intel_clock_t *best_clock);
124
static bool
125
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
126
               int target, int refclk, intel_clock_t *best_clock);
127
 
128
static inline u32 /* units of 100MHz */
129
intel_fdi_link_freq(struct drm_device *dev)
130
{
131
	if (IS_GEN5(dev)) {
132
		struct drm_i915_private *dev_priv = dev->dev_private;
133
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
134
	} else
135
		return 27;
136
}
137
 
138
static const intel_limit_t intel_limits_i8xx_dvo = {
139
        .dot = { .min = 25000, .max = 350000 },
140
        .vco = { .min = 930000, .max = 1400000 },
141
        .n = { .min = 3, .max = 16 },
142
        .m = { .min = 96, .max = 140 },
143
        .m1 = { .min = 18, .max = 26 },
144
        .m2 = { .min = 6, .max = 16 },
145
        .p = { .min = 4, .max = 128 },
146
        .p1 = { .min = 2, .max = 33 },
147
	.p2 = { .dot_limit = 165000,
148
		.p2_slow = 4, .p2_fast = 2 },
149
	.find_pll = intel_find_best_PLL,
150
};
151
 
152
static const intel_limit_t intel_limits_i8xx_lvds = {
153
        .dot = { .min = 25000, .max = 350000 },
154
        .vco = { .min = 930000, .max = 1400000 },
155
        .n = { .min = 3, .max = 16 },
156
        .m = { .min = 96, .max = 140 },
157
        .m1 = { .min = 18, .max = 26 },
158
        .m2 = { .min = 6, .max = 16 },
159
        .p = { .min = 4, .max = 128 },
160
        .p1 = { .min = 1, .max = 6 },
161
	.p2 = { .dot_limit = 165000,
162
		.p2_slow = 14, .p2_fast = 7 },
163
	.find_pll = intel_find_best_PLL,
164
};
165
 
166
static const intel_limit_t intel_limits_i9xx_sdvo = {
167
        .dot = { .min = 20000, .max = 400000 },
168
        .vco = { .min = 1400000, .max = 2800000 },
169
        .n = { .min = 1, .max = 6 },
170
        .m = { .min = 70, .max = 120 },
171
        .m1 = { .min = 10, .max = 22 },
172
        .m2 = { .min = 5, .max = 9 },
173
        .p = { .min = 5, .max = 80 },
174
        .p1 = { .min = 1, .max = 8 },
175
	.p2 = { .dot_limit = 200000,
176
		.p2_slow = 10, .p2_fast = 5 },
177
	.find_pll = intel_find_best_PLL,
178
};
179
 
180
static const intel_limit_t intel_limits_i9xx_lvds = {
181
        .dot = { .min = 20000, .max = 400000 },
182
        .vco = { .min = 1400000, .max = 2800000 },
183
        .n = { .min = 1, .max = 6 },
184
        .m = { .min = 70, .max = 120 },
185
        .m1 = { .min = 10, .max = 22 },
186
        .m2 = { .min = 5, .max = 9 },
187
        .p = { .min = 7, .max = 98 },
188
        .p1 = { .min = 1, .max = 8 },
189
	.p2 = { .dot_limit = 112000,
190
		.p2_slow = 14, .p2_fast = 7 },
191
	.find_pll = intel_find_best_PLL,
192
};
193
 
194
 
195
static const intel_limit_t intel_limits_g4x_sdvo = {
196
	.dot = { .min = 25000, .max = 270000 },
197
	.vco = { .min = 1750000, .max = 3500000},
198
	.n = { .min = 1, .max = 4 },
199
	.m = { .min = 104, .max = 138 },
200
	.m1 = { .min = 17, .max = 23 },
201
	.m2 = { .min = 5, .max = 11 },
202
	.p = { .min = 10, .max = 30 },
203
	.p1 = { .min = 1, .max = 3},
204
	.p2 = { .dot_limit = 270000,
205
		.p2_slow = 10,
206
		.p2_fast = 10
207
	},
208
	.find_pll = intel_g4x_find_best_PLL,
209
};
210
 
211
static const intel_limit_t intel_limits_g4x_hdmi = {
212
	.dot = { .min = 22000, .max = 400000 },
213
	.vco = { .min = 1750000, .max = 3500000},
214
	.n = { .min = 1, .max = 4 },
215
	.m = { .min = 104, .max = 138 },
216
	.m1 = { .min = 16, .max = 23 },
217
	.m2 = { .min = 5, .max = 11 },
218
	.p = { .min = 5, .max = 80 },
219
	.p1 = { .min = 1, .max = 8},
220
	.p2 = { .dot_limit = 165000,
221
		.p2_slow = 10, .p2_fast = 5 },
222
	.find_pll = intel_g4x_find_best_PLL,
223
};
224
 
225
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
226
	.dot = { .min = 20000, .max = 115000 },
227
	.vco = { .min = 1750000, .max = 3500000 },
228
	.n = { .min = 1, .max = 3 },
229
	.m = { .min = 104, .max = 138 },
230
	.m1 = { .min = 17, .max = 23 },
231
	.m2 = { .min = 5, .max = 11 },
232
	.p = { .min = 28, .max = 112 },
233
	.p1 = { .min = 2, .max = 8 },
234
	.p2 = { .dot_limit = 0,
235
		.p2_slow = 14, .p2_fast = 14
236
	},
237
	.find_pll = intel_g4x_find_best_PLL,
238
};
239
 
240
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
241
	.dot = { .min = 80000, .max = 224000 },
242
	.vco = { .min = 1750000, .max = 3500000 },
243
	.n = { .min = 1, .max = 3 },
244
	.m = { .min = 104, .max = 138 },
245
	.m1 = { .min = 17, .max = 23 },
246
	.m2 = { .min = 5, .max = 11 },
247
	.p = { .min = 14, .max = 42 },
248
	.p1 = { .min = 2, .max = 6 },
249
	.p2 = { .dot_limit = 0,
250
		.p2_slow = 7, .p2_fast = 7
251
	},
252
	.find_pll = intel_g4x_find_best_PLL,
253
};
254
 
255
static const intel_limit_t intel_limits_g4x_display_port = {
256
        .dot = { .min = 161670, .max = 227000 },
257
        .vco = { .min = 1750000, .max = 3500000},
258
        .n = { .min = 1, .max = 2 },
259
        .m = { .min = 97, .max = 108 },
260
        .m1 = { .min = 0x10, .max = 0x12 },
261
        .m2 = { .min = 0x05, .max = 0x06 },
262
        .p = { .min = 10, .max = 20 },
263
        .p1 = { .min = 1, .max = 2},
264
        .p2 = { .dot_limit = 0,
265
		.p2_slow = 10, .p2_fast = 10 },
266
        .find_pll = intel_find_pll_g4x_dp,
267
};
268
 
269
static const intel_limit_t intel_limits_pineview_sdvo = {
270
        .dot = { .min = 20000, .max = 400000},
271
        .vco = { .min = 1700000, .max = 3500000 },
272
	/* Pineview's Ncounter is a ring counter */
273
        .n = { .min = 3, .max = 6 },
274
        .m = { .min = 2, .max = 256 },
275
	/* Pineview only has one combined m divider, which we treat as m2. */
276
        .m1 = { .min = 0, .max = 0 },
277
        .m2 = { .min = 0, .max = 254 },
278
        .p = { .min = 5, .max = 80 },
279
        .p1 = { .min = 1, .max = 8 },
280
	.p2 = { .dot_limit = 200000,
281
		.p2_slow = 10, .p2_fast = 5 },
282
	.find_pll = intel_find_best_PLL,
283
};
284
 
285
static const intel_limit_t intel_limits_pineview_lvds = {
286
        .dot = { .min = 20000, .max = 400000 },
287
        .vco = { .min = 1700000, .max = 3500000 },
288
        .n = { .min = 3, .max = 6 },
289
        .m = { .min = 2, .max = 256 },
290
        .m1 = { .min = 0, .max = 0 },
291
        .m2 = { .min = 0, .max = 254 },
292
        .p = { .min = 7, .max = 112 },
293
        .p1 = { .min = 1, .max = 8 },
294
	.p2 = { .dot_limit = 112000,
295
		.p2_slow = 14, .p2_fast = 14 },
296
	.find_pll = intel_find_best_PLL,
297
};
298
 
299
/* Ironlake / Sandybridge
300
 *
301
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
302
 * the range value for them is (actual_value - 2).
303
 */
304
static const intel_limit_t intel_limits_ironlake_dac = {
305
	.dot = { .min = 25000, .max = 350000 },
306
	.vco = { .min = 1760000, .max = 3510000 },
307
	.n = { .min = 1, .max = 5 },
308
	.m = { .min = 79, .max = 127 },
309
	.m1 = { .min = 12, .max = 22 },
310
	.m2 = { .min = 5, .max = 9 },
311
	.p = { .min = 5, .max = 80 },
312
	.p1 = { .min = 1, .max = 8 },
313
	.p2 = { .dot_limit = 225000,
314
		.p2_slow = 10, .p2_fast = 5 },
315
	.find_pll = intel_g4x_find_best_PLL,
316
};
317
 
318
static const intel_limit_t intel_limits_ironlake_single_lvds = {
319
	.dot = { .min = 25000, .max = 350000 },
320
	.vco = { .min = 1760000, .max = 3510000 },
321
	.n = { .min = 1, .max = 3 },
322
	.m = { .min = 79, .max = 118 },
323
	.m1 = { .min = 12, .max = 22 },
324
	.m2 = { .min = 5, .max = 9 },
325
	.p = { .min = 28, .max = 112 },
326
	.p1 = { .min = 2, .max = 8 },
327
	.p2 = { .dot_limit = 225000,
328
		.p2_slow = 14, .p2_fast = 14 },
329
	.find_pll = intel_g4x_find_best_PLL,
330
};
331
 
332
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
333
	.dot = { .min = 25000, .max = 350000 },
334
	.vco = { .min = 1760000, .max = 3510000 },
335
	.n = { .min = 1, .max = 3 },
336
	.m = { .min = 79, .max = 127 },
337
	.m1 = { .min = 12, .max = 22 },
338
	.m2 = { .min = 5, .max = 9 },
339
	.p = { .min = 14, .max = 56 },
340
	.p1 = { .min = 2, .max = 8 },
341
	.p2 = { .dot_limit = 225000,
342
		.p2_slow = 7, .p2_fast = 7 },
343
	.find_pll = intel_g4x_find_best_PLL,
344
};
345
 
346
/* LVDS 100mhz refclk limits. */
347
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
348
	.dot = { .min = 25000, .max = 350000 },
349
	.vco = { .min = 1760000, .max = 3510000 },
350
	.n = { .min = 1, .max = 2 },
351
	.m = { .min = 79, .max = 126 },
352
	.m1 = { .min = 12, .max = 22 },
353
	.m2 = { .min = 5, .max = 9 },
354
	.p = { .min = 28, .max = 112 },
355
	.p1 = { .min = 2,.max = 8 },
356
	.p2 = { .dot_limit = 225000,
357
		.p2_slow = 14, .p2_fast = 14 },
358
	.find_pll = intel_g4x_find_best_PLL,
359
};
360
 
361
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
362
	.dot = { .min = 25000, .max = 350000 },
363
	.vco = { .min = 1760000, .max = 3510000 },
364
	.n = { .min = 1, .max = 3 },
365
	.m = { .min = 79, .max = 126 },
366
	.m1 = { .min = 12, .max = 22 },
367
	.m2 = { .min = 5, .max = 9 },
368
	.p = { .min = 14, .max = 42 },
369
	.p1 = { .min = 2,.max = 6 },
370
	.p2 = { .dot_limit = 225000,
371
		.p2_slow = 7, .p2_fast = 7 },
372
	.find_pll = intel_g4x_find_best_PLL,
373
};
374
 
375
static const intel_limit_t intel_limits_ironlake_display_port = {
376
        .dot = { .min = 25000, .max = 350000 },
377
        .vco = { .min = 1760000, .max = 3510000},
378
        .n = { .min = 1, .max = 2 },
379
        .m = { .min = 81, .max = 90 },
380
        .m1 = { .min = 12, .max = 22 },
381
        .m2 = { .min = 5, .max = 9 },
382
        .p = { .min = 10, .max = 20 },
383
        .p1 = { .min = 1, .max = 2},
384
        .p2 = { .dot_limit = 0,
385
		.p2_slow = 10, .p2_fast = 10 },
386
        .find_pll = intel_find_pll_ironlake_dp,
387
};
388
 
389
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
390
						int refclk)
391
{
392
	struct drm_device *dev = crtc->dev;
393
	struct drm_i915_private *dev_priv = dev->dev_private;
394
	const intel_limit_t *limit;
395
 
396
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
397
		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
398
		    LVDS_CLKB_POWER_UP) {
399
			/* LVDS dual channel */
400
			if (refclk == 100000)
401
				limit = &intel_limits_ironlake_dual_lvds_100m;
402
			else
403
				limit = &intel_limits_ironlake_dual_lvds;
404
		} else {
405
			if (refclk == 100000)
406
				limit = &intel_limits_ironlake_single_lvds_100m;
407
			else
408
				limit = &intel_limits_ironlake_single_lvds;
409
		}
410
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
411
			HAS_eDP)
412
		limit = &intel_limits_ironlake_display_port;
413
	else
414
		limit = &intel_limits_ironlake_dac;
415
 
416
	return limit;
417
}
418
 
419
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
420
{
421
	struct drm_device *dev = crtc->dev;
422
	struct drm_i915_private *dev_priv = dev->dev_private;
423
	const intel_limit_t *limit;
424
 
425
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
426
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
427
		    LVDS_CLKB_POWER_UP)
428
			/* LVDS with dual channel */
429
			limit = &intel_limits_g4x_dual_channel_lvds;
430
		else
431
			/* LVDS with dual channel */
432
			limit = &intel_limits_g4x_single_channel_lvds;
433
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
434
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
435
		limit = &intel_limits_g4x_hdmi;
436
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
437
		limit = &intel_limits_g4x_sdvo;
438
	} else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
439
		limit = &intel_limits_g4x_display_port;
440
	} else /* The option is for other outputs */
441
		limit = &intel_limits_i9xx_sdvo;
442
 
443
	return limit;
444
}
445
 
446
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
447
{
448
	struct drm_device *dev = crtc->dev;
449
	const intel_limit_t *limit;
450
 
451
	if (HAS_PCH_SPLIT(dev))
452
		limit = intel_ironlake_limit(crtc, refclk);
453
	else if (IS_G4X(dev)) {
454
		limit = intel_g4x_limit(crtc);
455
	} else if (IS_PINEVIEW(dev)) {
456
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
457
			limit = &intel_limits_pineview_lvds;
458
		else
459
			limit = &intel_limits_pineview_sdvo;
460
	} else if (!IS_GEN2(dev)) {
461
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
462
			limit = &intel_limits_i9xx_lvds;
463
		else
464
			limit = &intel_limits_i9xx_sdvo;
465
	} else {
466
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
467
			limit = &intel_limits_i8xx_lvds;
468
		else
469
			limit = &intel_limits_i8xx_dvo;
470
	}
471
	return limit;
472
}
473
 
474
/* m1 is reserved as 0 in Pineview, n is a ring counter */
475
static void pineview_clock(int refclk, intel_clock_t *clock)
476
{
477
	clock->m = clock->m2 + 2;
478
	clock->p = clock->p1 * clock->p2;
479
	clock->vco = refclk * clock->m / clock->n;
480
	clock->dot = clock->vco / clock->p;
481
}
482
 
483
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
484
{
485
	if (IS_PINEVIEW(dev)) {
486
		pineview_clock(refclk, clock);
487
		return;
488
	}
489
	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
490
	clock->p = clock->p1 * clock->p2;
491
	clock->vco = refclk * clock->m / (clock->n + 2);
492
	clock->dot = clock->vco / clock->p;
493
}
494
 
495
/**
496
 * Returns whether any output on the specified pipe is of the specified type
497
 */
498
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
499
{
500
	struct drm_device *dev = crtc->dev;
501
	struct drm_mode_config *mode_config = &dev->mode_config;
502
	struct intel_encoder *encoder;
503
 
504
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
505
		if (encoder->base.crtc == crtc && encoder->type == type)
506
			return true;
507
 
508
	return false;
509
}
510
 
511
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
512
/**
513
 * Returns whether the given set of divisors are valid for a given refclk with
514
 * the given connectors.
515
 */
516
 
517
static bool intel_PLL_is_valid(struct drm_device *dev,
518
			       const intel_limit_t *limit,
519
			       const intel_clock_t *clock)
520
{
521
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
522
		INTELPllInvalid ("p1 out of range\n");
523
	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
524
		INTELPllInvalid ("p out of range\n");
525
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
526
		INTELPllInvalid ("m2 out of range\n");
527
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
528
		INTELPllInvalid ("m1 out of range\n");
529
	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
530
		INTELPllInvalid ("m1 <= m2\n");
531
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
532
		INTELPllInvalid ("m out of range\n");
533
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
534
		INTELPllInvalid ("n out of range\n");
535
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
536
		INTELPllInvalid ("vco out of range\n");
537
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
538
	 * connector, etc., rather than just a single range.
539
	 */
540
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
541
		INTELPllInvalid ("dot out of range\n");
542
 
543
	return true;
544
}
545
 
546
static bool
547
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
548
		    int target, int refclk, intel_clock_t *best_clock)
549
 
550
{
551
	struct drm_device *dev = crtc->dev;
552
	struct drm_i915_private *dev_priv = dev->dev_private;
553
	intel_clock_t clock;
554
	int err = target;
555
 
556
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
557
	    (I915_READ(LVDS)) != 0) {
558
		/*
559
		 * For LVDS, if the panel is on, just rely on its current
560
		 * settings for dual-channel.  We haven't figured out how to
561
		 * reliably set up different single/dual channel state, if we
562
		 * even can.
563
		 */
564
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
565
		    LVDS_CLKB_POWER_UP)
566
			clock.p2 = limit->p2.p2_fast;
567
		else
568
			clock.p2 = limit->p2.p2_slow;
569
	} else {
570
		if (target < limit->p2.dot_limit)
571
			clock.p2 = limit->p2.p2_slow;
572
		else
573
			clock.p2 = limit->p2.p2_fast;
574
	}
575
 
576
	memset (best_clock, 0, sizeof (*best_clock));
577
 
578
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
579
	     clock.m1++) {
580
		for (clock.m2 = limit->m2.min;
581
		     clock.m2 <= limit->m2.max; clock.m2++) {
582
			/* m1 is always 0 in Pineview */
583
			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
584
				break;
585
			for (clock.n = limit->n.min;
586
			     clock.n <= limit->n.max; clock.n++) {
587
				for (clock.p1 = limit->p1.min;
588
					clock.p1 <= limit->p1.max; clock.p1++) {
589
					int this_err;
590
 
591
					intel_clock(dev, refclk, &clock);
592
					if (!intel_PLL_is_valid(dev, limit,
593
								&clock))
594
						continue;
595
 
596
					this_err = abs(clock.dot - target);
597
					if (this_err < err) {
598
						*best_clock = clock;
599
						err = this_err;
600
					}
601
				}
602
			}
603
		}
604
	}
605
 
606
	return (err != target);
607
}
608
 
609
static bool
610
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
611
			int target, int refclk, intel_clock_t *best_clock)
612
{
613
	struct drm_device *dev = crtc->dev;
614
	struct drm_i915_private *dev_priv = dev->dev_private;
615
	intel_clock_t clock;
616
	int max_n;
617
	bool found;
618
	/* approximately equals target * 0.00585 */
619
	int err_most = (target >> 8) + (target >> 9);
620
	found = false;
621
 
622
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
623
		int lvds_reg;
624
 
625
		if (HAS_PCH_SPLIT(dev))
626
			lvds_reg = PCH_LVDS;
627
		else
628
			lvds_reg = LVDS;
629
		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
630
		    LVDS_CLKB_POWER_UP)
631
			clock.p2 = limit->p2.p2_fast;
632
		else
633
			clock.p2 = limit->p2.p2_slow;
634
	} else {
635
		if (target < limit->p2.dot_limit)
636
			clock.p2 = limit->p2.p2_slow;
637
		else
638
			clock.p2 = limit->p2.p2_fast;
639
	}
640
 
641
	memset(best_clock, 0, sizeof(*best_clock));
642
	max_n = limit->n.max;
643
	/* based on hardware requirement, prefer smaller n to precision */
644
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
645
		/* based on hardware requirement, prefere larger m1,m2 */
646
		for (clock.m1 = limit->m1.max;
647
		     clock.m1 >= limit->m1.min; clock.m1--) {
648
			for (clock.m2 = limit->m2.max;
649
			     clock.m2 >= limit->m2.min; clock.m2--) {
650
				for (clock.p1 = limit->p1.max;
651
				     clock.p1 >= limit->p1.min; clock.p1--) {
652
					int this_err;
653
 
654
					intel_clock(dev, refclk, &clock);
655
					if (!intel_PLL_is_valid(dev, limit,
656
								&clock))
657
						continue;
658
 
659
					this_err = abs(clock.dot - target);
660
					if (this_err < err_most) {
661
						*best_clock = clock;
662
						err_most = this_err;
663
						max_n = clock.n;
664
						found = true;
665
					}
666
				}
667
			}
668
		}
669
	}
670
	return found;
671
}
672
 
673
static bool
674
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
675
			   int target, int refclk, intel_clock_t *best_clock)
676
{
677
	struct drm_device *dev = crtc->dev;
678
	intel_clock_t clock;
679
 
680
	if (target < 200000) {
681
		clock.n = 1;
682
		clock.p1 = 2;
683
		clock.p2 = 10;
684
		clock.m1 = 12;
685
		clock.m2 = 9;
686
	} else {
687
		clock.n = 2;
688
		clock.p1 = 1;
689
		clock.p2 = 10;
690
		clock.m1 = 14;
691
		clock.m2 = 8;
692
	}
693
	intel_clock(dev, refclk, &clock);
694
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
695
	return true;
696
}
697
 
698
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
699
static bool
700
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
701
		      int target, int refclk, intel_clock_t *best_clock)
702
{
703
	intel_clock_t clock;
704
	if (target < 200000) {
705
		clock.p1 = 2;
706
		clock.p2 = 10;
707
		clock.n = 2;
708
		clock.m1 = 23;
709
		clock.m2 = 8;
710
	} else {
711
		clock.p1 = 1;
712
		clock.p2 = 10;
713
		clock.n = 1;
714
		clock.m1 = 14;
715
		clock.m2 = 2;
716
	}
717
	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
718
	clock.p = (clock.p1 * clock.p2);
719
	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
720
	clock.vco = 0;
721
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
722
	return true;
723
}
724
 
725
/**
726
 * intel_wait_for_vblank - wait for vblank on a given pipe
727
 * @dev: drm device
728
 * @pipe: pipe to wait for
729
 *
730
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
731
 * mode setting code.
732
 */
733
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
734
{
735
	struct drm_i915_private *dev_priv = dev->dev_private;
736
	int pipestat_reg = PIPESTAT(pipe);
737
 
738
	/* Clear existing vblank status. Note this will clear any other
739
	 * sticky status fields as well.
740
	 *
741
	 * This races with i915_driver_irq_handler() with the result
742
	 * that either function could miss a vblank event.  Here it is not
743
	 * fatal, as we will either wait upon the next vblank interrupt or
744
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
745
	 * called during modeset at which time the GPU should be idle and
746
	 * should *not* be performing page flips and thus not waiting on
747
	 * vblanks...
748
	 * Currently, the result of us stealing a vblank from the irq
749
	 * handler is that a single frame will be skipped during swapbuffers.
750
	 */
751
	I915_WRITE(pipestat_reg,
752
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
753
 
754
	/* Wait for vblank interrupt bit to set */
755
	if (wait_for(I915_READ(pipestat_reg) &
756
		     PIPE_VBLANK_INTERRUPT_STATUS,
757
		     50))
758
		DRM_DEBUG_KMS("vblank wait timed out\n");
759
}
760
 
761
/*
762
 * intel_wait_for_pipe_off - wait for pipe to turn off
763
 * @dev: drm device
764
 * @pipe: pipe to wait for
765
 *
766
 * After disabling a pipe, we can't wait for vblank in the usual way,
767
 * spinning on the vblank interrupt status bit, since we won't actually
768
 * see an interrupt when the pipe is disabled.
769
 *
770
 * On Gen4 and above:
771
 *   wait for the pipe register state bit to turn off
772
 *
773
 * Otherwise:
774
 *   wait for the display line value to settle (it usually
775
 *   ends up stopping at the start of the next frame).
776
 *
777
 */
778
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
779
{
780
	struct drm_i915_private *dev_priv = dev->dev_private;
781
 
782
	if (INTEL_INFO(dev)->gen >= 4) {
783
		int reg = PIPECONF(pipe);
784
 
785
		/* Wait for the Pipe State to go off */
786
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
787
			     100))
788
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
789
	} else {
790
		u32 last_line;
791
		int reg = PIPEDSL(pipe);
792
		unsigned long timeout = jiffies + msecs_to_jiffies(100);
793
 
794
		/* Wait for the display line to settle */
795
		do {
796
			last_line = I915_READ(reg) & DSL_LINEMASK;
797
			mdelay(5);
798
		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
799
			 time_after(timeout, jiffies));
800
		if (time_after(jiffies, timeout))
801
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
802
	}
803
}
804
 
805
static const char *state_string(bool enabled)
806
{
807
	return enabled ? "on" : "off";
808
}
809
 
810
/* Only for pre-ILK configs */
811
static void assert_pll(struct drm_i915_private *dev_priv,
812
		       enum pipe pipe, bool state)
813
{
814
	int reg;
815
	u32 val;
816
	bool cur_state;
817
 
818
	reg = DPLL(pipe);
819
	val = I915_READ(reg);
820
	cur_state = !!(val & DPLL_VCO_ENABLE);
821
	WARN(cur_state != state,
822
	     "PLL state assertion failure (expected %s, current %s)\n",
823
	     state_string(state), state_string(cur_state));
824
}
825
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
826
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
827
 
828
/* For ILK+ */
829
static void assert_pch_pll(struct drm_i915_private *dev_priv,
830
			   enum pipe pipe, bool state)
831
{
832
	int reg;
833
	u32 val;
834
	bool cur_state;
835
 
836
	reg = PCH_DPLL(pipe);
837
	val = I915_READ(reg);
838
	cur_state = !!(val & DPLL_VCO_ENABLE);
839
	WARN(cur_state != state,
840
	     "PCH PLL state assertion failure (expected %s, current %s)\n",
841
	     state_string(state), state_string(cur_state));
842
}
843
#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
844
#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
845
 
846
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
847
			  enum pipe pipe, bool state)
848
{
849
	int reg;
850
	u32 val;
851
	bool cur_state;
852
 
853
	reg = FDI_TX_CTL(pipe);
854
	val = I915_READ(reg);
855
	cur_state = !!(val & FDI_TX_ENABLE);
856
	WARN(cur_state != state,
857
	     "FDI TX state assertion failure (expected %s, current %s)\n",
858
	     state_string(state), state_string(cur_state));
859
}
860
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
861
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
862
 
863
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
864
			  enum pipe pipe, bool state)
865
{
866
	int reg;
867
	u32 val;
868
	bool cur_state;
869
 
870
	reg = FDI_RX_CTL(pipe);
871
	val = I915_READ(reg);
872
	cur_state = !!(val & FDI_RX_ENABLE);
873
	WARN(cur_state != state,
874
	     "FDI RX state assertion failure (expected %s, current %s)\n",
875
	     state_string(state), state_string(cur_state));
876
}
877
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
878
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
879
 
880
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
881
				      enum pipe pipe)
882
{
883
	int reg;
884
	u32 val;
885
 
886
	/* ILK FDI PLL is always enabled */
887
	if (dev_priv->info->gen == 5)
888
		return;
889
 
890
	reg = FDI_TX_CTL(pipe);
891
	val = I915_READ(reg);
892
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
893
}
894
 
895
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
896
				      enum pipe pipe)
897
{
898
	int reg;
899
	u32 val;
900
 
901
	reg = FDI_RX_CTL(pipe);
902
	val = I915_READ(reg);
903
	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
904
}
905
 
906
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
907
				  enum pipe pipe)
908
{
909
	int pp_reg, lvds_reg;
910
	u32 val;
911
	enum pipe panel_pipe = PIPE_A;
912
	bool locked = true;
913
 
914
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
915
		pp_reg = PCH_PP_CONTROL;
916
		lvds_reg = PCH_LVDS;
917
	} else {
918
		pp_reg = PP_CONTROL;
919
		lvds_reg = LVDS;
920
	}
921
 
922
	val = I915_READ(pp_reg);
923
	if (!(val & PANEL_POWER_ON) ||
924
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
925
		locked = false;
926
 
927
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
928
		panel_pipe = PIPE_B;
929
 
930
	WARN(panel_pipe == pipe && locked,
931
	     "panel assertion failure, pipe %c regs locked\n",
932
	     pipe_name(pipe));
933
}
934
 
935
static void assert_pipe(struct drm_i915_private *dev_priv,
936
			enum pipe pipe, bool state)
937
{
938
	int reg;
939
	u32 val;
940
	bool cur_state;
941
 
942
	reg = PIPECONF(pipe);
943
	val = I915_READ(reg);
944
	cur_state = !!(val & PIPECONF_ENABLE);
945
	WARN(cur_state != state,
946
	     "pipe %c assertion failure (expected %s, current %s)\n",
947
	     pipe_name(pipe), state_string(state), state_string(cur_state));
948
}
949
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
950
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
951
 
952
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
953
				 enum plane plane)
954
{
955
	int reg;
956
	u32 val;
957
 
958
	reg = DSPCNTR(plane);
959
	val = I915_READ(reg);
960
	WARN(!(val & DISPLAY_PLANE_ENABLE),
961
	     "plane %c assertion failure, should be active but is disabled\n",
962
	     plane_name(plane));
963
}
964
 
965
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
966
				   enum pipe pipe)
967
{
968
	int reg, i;
969
	u32 val;
970
	int cur_pipe;
971
 
972
	/* Planes are fixed to pipes on ILK+ */
973
	if (HAS_PCH_SPLIT(dev_priv->dev))
974
		return;
975
 
976
	/* Need to check both planes against the pipe */
977
	for (i = 0; i < 2; i++) {
978
		reg = DSPCNTR(i);
979
		val = I915_READ(reg);
980
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
981
			DISPPLANE_SEL_PIPE_SHIFT;
982
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
983
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
984
		     plane_name(i), pipe_name(pipe));
985
	}
986
}
987
 
988
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
989
{
990
	u32 val;
991
	bool enabled;
992
 
993
	val = I915_READ(PCH_DREF_CONTROL);
994
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
995
			    DREF_SUPERSPREAD_SOURCE_MASK));
996
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
997
}
998
 
999
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1000
				       enum pipe pipe)
1001
{
1002
	int reg;
1003
	u32 val;
1004
	bool enabled;
1005
 
1006
	reg = TRANSCONF(pipe);
1007
	val = I915_READ(reg);
1008
	enabled = !!(val & TRANS_ENABLE);
1009
	WARN(enabled,
1010
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1011
	     pipe_name(pipe));
1012
}
1013
 
1014
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1015
			    enum pipe pipe, u32 port_sel, u32 val)
1016
{
1017
	if ((val & DP_PORT_EN) == 0)
1018
		return false;
1019
 
1020
	if (HAS_PCH_CPT(dev_priv->dev)) {
1021
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1022
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1023
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1024
			return false;
1025
	} else {
1026
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1027
			return false;
1028
	}
1029
	return true;
1030
}
1031
 
1032
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1033
			      enum pipe pipe, u32 val)
1034
{
1035
	if ((val & PORT_ENABLE) == 0)
1036
		return false;
1037
 
1038
	if (HAS_PCH_CPT(dev_priv->dev)) {
1039
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1040
			return false;
1041
	} else {
1042
		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1043
			return false;
1044
	}
1045
	return true;
1046
}
1047
 
1048
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1049
			      enum pipe pipe, u32 val)
1050
{
1051
	if ((val & LVDS_PORT_EN) == 0)
1052
		return false;
1053
 
1054
	if (HAS_PCH_CPT(dev_priv->dev)) {
1055
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1056
			return false;
1057
	} else {
1058
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1059
			return false;
1060
	}
1061
	return true;
1062
}
1063
 
1064
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1065
			      enum pipe pipe, u32 val)
1066
{
1067
	if ((val & ADPA_DAC_ENABLE) == 0)
1068
		return false;
1069
	if (HAS_PCH_CPT(dev_priv->dev)) {
1070
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1071
			return false;
1072
	} else {
1073
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1074
			return false;
1075
	}
1076
	return true;
1077
}
1078
 
1079
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1080
				   enum pipe pipe, int reg, u32 port_sel)
1081
{
1082
	u32 val = I915_READ(reg);
1083
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1084
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1085
	     reg, pipe_name(pipe));
1086
}
1087
 
1088
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1089
				     enum pipe pipe, int reg)
1090
{
1091
	u32 val = I915_READ(reg);
1092
	WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1093
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1094
	     reg, pipe_name(pipe));
1095
}
1096
 
1097
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1098
				      enum pipe pipe)
1099
{
1100
	int reg;
1101
	u32 val;
1102
 
1103
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1104
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1105
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1106
 
1107
	reg = PCH_ADPA;
1108
	val = I915_READ(reg);
1109
	WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1110
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1111
	     pipe_name(pipe));
1112
 
1113
	reg = PCH_LVDS;
1114
	val = I915_READ(reg);
1115
	WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1116
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1117
	     pipe_name(pipe));
1118
 
1119
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1120
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1121
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1122
}
1123
 
1124
/**
1125
 * intel_enable_pll - enable a PLL
1126
 * @dev_priv: i915 private structure
1127
 * @pipe: pipe PLL to enable
1128
 *
1129
 * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1130
 * make sure the PLL reg is writable first though, since the panel write
1131
 * protect mechanism may be enabled.
1132
 *
1133
 * Note!  This is for pre-ILK only.
1134
 */
1135
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1136
{
1137
    int reg;
1138
    u32 val;
1139
 
1140
    /* No really, not for ILK+ */
1141
    BUG_ON(dev_priv->info->gen >= 5);
1142
 
1143
    /* PLL is protected by panel, make sure we can write it */
1144
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1145
        assert_panel_unlocked(dev_priv, pipe);
1146
 
1147
    reg = DPLL(pipe);
1148
    val = I915_READ(reg);
1149
    val |= DPLL_VCO_ENABLE;
1150
 
1151
    /* We do this three times for luck */
1152
    I915_WRITE(reg, val);
1153
    POSTING_READ(reg);
1154
    udelay(150); /* wait for warmup */
1155
    I915_WRITE(reg, val);
1156
    POSTING_READ(reg);
1157
    udelay(150); /* wait for warmup */
1158
    I915_WRITE(reg, val);
1159
    POSTING_READ(reg);
1160
    udelay(150); /* wait for warmup */
1161
}
1162
 
1163
/**
1164
 * intel_disable_pll - disable a PLL
1165
 * @dev_priv: i915 private structure
1166
 * @pipe: pipe PLL to disable
1167
 *
1168
 * Disable the PLL for @pipe, making sure the pipe is off first.
1169
 *
1170
 * Note!  This is for pre-ILK only.
1171
 */
1172
static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1173
{
1174
	int reg;
1175
	u32 val;
1176
 
1177
	/* Don't disable pipe A or pipe A PLLs if needed */
1178
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1179
		return;
1180
 
1181
	/* Make sure the pipe isn't still relying on us */
1182
	assert_pipe_disabled(dev_priv, pipe);
1183
 
1184
	reg = DPLL(pipe);
1185
	val = I915_READ(reg);
1186
	val &= ~DPLL_VCO_ENABLE;
1187
	I915_WRITE(reg, val);
1188
	POSTING_READ(reg);
1189
}
1190
 
1191
/**
1192
 * intel_enable_pch_pll - enable PCH PLL
1193
 * @dev_priv: i915 private structure
1194
 * @pipe: pipe PLL to enable
1195
 *
1196
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1197
 * drives the transcoder clock.
1198
 */
1199
static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1200
				 enum pipe pipe)
1201
{
1202
	int reg;
1203
	u32 val;
1204
 
1205
	/* PCH only available on ILK+ */
1206
	BUG_ON(dev_priv->info->gen < 5);
1207
 
1208
	/* PCH refclock must be enabled first */
1209
	assert_pch_refclk_enabled(dev_priv);
1210
 
1211
	reg = PCH_DPLL(pipe);
1212
	val = I915_READ(reg);
1213
	val |= DPLL_VCO_ENABLE;
1214
	I915_WRITE(reg, val);
1215
	POSTING_READ(reg);
1216
	udelay(200);
1217
}
1218
 
1219
static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1220
				  enum pipe pipe)
1221
{
1222
	int reg;
1223
	u32 val;
1224
 
1225
	/* PCH only available on ILK+ */
1226
	BUG_ON(dev_priv->info->gen < 5);
1227
 
1228
	/* Make sure transcoder isn't still depending on us */
1229
	assert_transcoder_disabled(dev_priv, pipe);
1230
 
1231
	reg = PCH_DPLL(pipe);
1232
	val = I915_READ(reg);
1233
	val &= ~DPLL_VCO_ENABLE;
1234
	I915_WRITE(reg, val);
1235
	POSTING_READ(reg);
1236
	udelay(200);
1237
}
1238
 
1239
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1240
				    enum pipe pipe)
1241
{
1242
	int reg;
1243
	u32 val;
1244
 
1245
	/* PCH only available on ILK+ */
1246
	BUG_ON(dev_priv->info->gen < 5);
1247
 
1248
	/* Make sure PCH DPLL is enabled */
1249
	assert_pch_pll_enabled(dev_priv, pipe);
1250
 
1251
	/* FDI must be feeding us bits for PCH ports */
1252
	assert_fdi_tx_enabled(dev_priv, pipe);
1253
	assert_fdi_rx_enabled(dev_priv, pipe);
1254
 
1255
	reg = TRANSCONF(pipe);
1256
	val = I915_READ(reg);
1257
 
1258
	if (HAS_PCH_IBX(dev_priv->dev)) {
1259
		/*
1260
		 * make the BPC in transcoder be consistent with
1261
		 * that in pipeconf reg.
1262
		 */
1263
		val &= ~PIPE_BPC_MASK;
1264
		val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1265
	}
1266
	I915_WRITE(reg, val | TRANS_ENABLE);
1267
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1268
		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1269
}
1270
 
1271
static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1272
				     enum pipe pipe)
1273
{
1274
	int reg;
1275
	u32 val;
1276
 
1277
	/* FDI relies on the transcoder */
1278
	assert_fdi_tx_disabled(dev_priv, pipe);
1279
	assert_fdi_rx_disabled(dev_priv, pipe);
1280
 
1281
	/* Ports must be off as well */
1282
	assert_pch_ports_disabled(dev_priv, pipe);
1283
 
1284
	reg = TRANSCONF(pipe);
1285
	val = I915_READ(reg);
1286
	val &= ~TRANS_ENABLE;
1287
	I915_WRITE(reg, val);
1288
	/* wait for PCH transcoder off, transcoder state */
1289
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1290
		DRM_ERROR("failed to disable transcoder\n");
1291
}
1292
 
1293
/**
1294
 * intel_enable_pipe - enable a pipe, asserting requirements
1295
 * @dev_priv: i915 private structure
1296
 * @pipe: pipe to enable
1297
 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1298
 *
1299
 * Enable @pipe, making sure that various hardware specific requirements
1300
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1301
 *
1302
 * @pipe should be %PIPE_A or %PIPE_B.
1303
 *
1304
 * Will wait until the pipe is actually running (i.e. first vblank) before
1305
 * returning.
1306
 */
1307
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1308
			      bool pch_port)
1309
{
1310
	int reg;
1311
	u32 val;
1312
 
1313
	/*
1314
	 * A pipe without a PLL won't actually be able to drive bits from
1315
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1316
	 * need the check.
1317
	 */
1318
	if (!HAS_PCH_SPLIT(dev_priv->dev))
1319
		assert_pll_enabled(dev_priv, pipe);
1320
	else {
1321
		if (pch_port) {
1322
			/* if driving the PCH, we need FDI enabled */
1323
			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1324
			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1325
		}
1326
		/* FIXME: assert CPU port conditions for SNB+ */
1327
	}
1328
 
1329
	reg = PIPECONF(pipe);
1330
	val = I915_READ(reg);
1331
	if (val & PIPECONF_ENABLE)
1332
		return;
1333
 
1334
	I915_WRITE(reg, val | PIPECONF_ENABLE);
1335
	intel_wait_for_vblank(dev_priv->dev, pipe);
1336
}
1337
 
1338
/**
1339
 * intel_disable_pipe - disable a pipe, asserting requirements
1340
 * @dev_priv: i915 private structure
1341
 * @pipe: pipe to disable
1342
 *
1343
 * Disable @pipe, making sure that various hardware specific requirements
1344
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1345
 *
1346
 * @pipe should be %PIPE_A or %PIPE_B.
1347
 *
1348
 * Will wait until the pipe has shut down before returning.
1349
 */
1350
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1351
			       enum pipe pipe)
1352
{
1353
	int reg;
1354
	u32 val;
1355
 
1356
	/*
1357
	 * Make sure planes won't keep trying to pump pixels to us,
1358
	 * or we might hang the display.
1359
	 */
1360
	assert_planes_disabled(dev_priv, pipe);
1361
 
1362
	/* Don't disable pipe A or pipe A PLLs if needed */
1363
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1364
		return;
1365
 
1366
	reg = PIPECONF(pipe);
1367
	val = I915_READ(reg);
1368
	if ((val & PIPECONF_ENABLE) == 0)
1369
		return;
1370
 
1371
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1372
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1373
}
1374
 
1375
/*
1376
 * Plane regs are double buffered, going from enabled->disabled needs a
1377
 * trigger in order to latch.  The display address reg provides this.
1378
 */
1379
static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1380
				      enum plane plane)
1381
{
1382
	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1383
	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1384
}
1385
 
1386
/**
1387
 * intel_enable_plane - enable a display plane on a given pipe
1388
 * @dev_priv: i915 private structure
1389
 * @plane: plane to enable
1390
 * @pipe: pipe being fed
1391
 *
1392
 * Enable @plane on @pipe, making sure that @pipe is running first.
1393
 */
1394
static void intel_enable_plane(struct drm_i915_private *dev_priv,
1395
			       enum plane plane, enum pipe pipe)
1396
{
1397
	int reg;
1398
	u32 val;
1399
 
1400
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1401
	assert_pipe_enabled(dev_priv, pipe);
1402
 
1403
	reg = DSPCNTR(plane);
1404
	val = I915_READ(reg);
1405
	if (val & DISPLAY_PLANE_ENABLE)
1406
		return;
1407
 
1408
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1409
	intel_flush_display_plane(dev_priv, plane);
1410
	intel_wait_for_vblank(dev_priv->dev, pipe);
1411
}
1412
 
1413
/**
1414
 * intel_disable_plane - disable a display plane
1415
 * @dev_priv: i915 private structure
1416
 * @plane: plane to disable
1417
 * @pipe: pipe consuming the data
1418
 *
1419
 * Disable @plane; should be an independent operation.
1420
 */
1421
static void intel_disable_plane(struct drm_i915_private *dev_priv,
1422
				enum plane plane, enum pipe pipe)
1423
{
1424
	int reg;
1425
	u32 val;
1426
 
1427
	reg = DSPCNTR(plane);
1428
	val = I915_READ(reg);
1429
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1430
		return;
1431
 
1432
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1433
	intel_flush_display_plane(dev_priv, plane);
1434
	intel_wait_for_vblank(dev_priv->dev, pipe);
1435
}
1436
 
1437
static void disable_pch_dp(struct drm_i915_private *dev_priv,
1438
			   enum pipe pipe, int reg, u32 port_sel)
1439
{
1440
	u32 val = I915_READ(reg);
1441
	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1442
		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1443
		I915_WRITE(reg, val & ~DP_PORT_EN);
1444
	}
1445
}
1446
 
1447
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1448
			     enum pipe pipe, int reg)
1449
{
1450
	u32 val = I915_READ(reg);
1451
	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1452
		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1453
			      reg, pipe);
1454
		I915_WRITE(reg, val & ~PORT_ENABLE);
1455
	}
1456
}
1457
 
1458
/* Disable any ports connected to this transcoder */
1459
static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1460
				    enum pipe pipe)
1461
{
1462
	u32 reg, val;
1463
 
1464
	val = I915_READ(PCH_PP_CONTROL);
1465
	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1466
 
1467
	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1468
	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1469
	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1470
 
1471
	reg = PCH_ADPA;
1472
	val = I915_READ(reg);
1473
	if (adpa_pipe_enabled(dev_priv, val, pipe))
1474
		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1475
 
1476
	reg = PCH_LVDS;
1477
	val = I915_READ(reg);
1478
	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1479
		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1480
		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1481
		POSTING_READ(reg);
1482
		udelay(100);
1483
	}
1484
 
1485
	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1486
	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1487
	disable_pch_hdmi(dev_priv, pipe, HDMID);
1488
}
1489
 
1490
static void i8xx_disable_fbc(struct drm_device *dev)
1491
{
1492
    struct drm_i915_private *dev_priv = dev->dev_private;
1493
    u32 fbc_ctl;
1494
 
1495
    /* Disable compression */
1496
    fbc_ctl = I915_READ(FBC_CONTROL);
1497
    if ((fbc_ctl & FBC_CTL_EN) == 0)
1498
        return;
1499
 
1500
    fbc_ctl &= ~FBC_CTL_EN;
1501
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1502
 
1503
    /* Wait for compressing bit to clear */
1504
    if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1505
        DRM_DEBUG_KMS("FBC idle timed out\n");
1506
        return;
1507
    }
1508
 
1509
    DRM_DEBUG_KMS("disabled FBC\n");
1510
}
1511
 
1512
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1513
{
1514
    struct drm_device *dev = crtc->dev;
1515
    struct drm_i915_private *dev_priv = dev->dev_private;
1516
    struct drm_framebuffer *fb = crtc->fb;
1517
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1518
    struct drm_i915_gem_object *obj = intel_fb->obj;
1519
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1520
    int cfb_pitch;
1521
    int plane, i;
1522
    u32 fbc_ctl, fbc_ctl2;
1523
 
1524
    cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1525
    if (fb->pitch < cfb_pitch)
1526
        cfb_pitch = fb->pitch;
1527
 
1528
    /* FBC_CTL wants 64B units */
1529
    cfb_pitch = (cfb_pitch / 64) - 1;
1530
    plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1531
 
1532
    /* Clear old tags */
1533
    for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1534
        I915_WRITE(FBC_TAG + (i * 4), 0);
1535
 
1536
    /* Set it up... */
1537
    fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1538
    fbc_ctl2 |= plane;
1539
    I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1540
    I915_WRITE(FBC_FENCE_OFF, crtc->y);
1541
 
1542
    /* enable it... */
1543
    fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1544
    if (IS_I945GM(dev))
1545
        fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1546
    fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1547
    fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1548
    fbc_ctl |= obj->fence_reg;
1549
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1550
 
1551
    DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1552
              cfb_pitch, crtc->y, intel_crtc->plane);
1553
}
1554
 
1555
static bool i8xx_fbc_enabled(struct drm_device *dev)
1556
{
1557
    struct drm_i915_private *dev_priv = dev->dev_private;
1558
 
1559
    return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1560
}
1561
 
1562
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1563
{
1564
    struct drm_device *dev = crtc->dev;
1565
    struct drm_i915_private *dev_priv = dev->dev_private;
1566
    struct drm_framebuffer *fb = crtc->fb;
1567
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1568
    struct drm_i915_gem_object *obj = intel_fb->obj;
1569
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1570
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1571
    unsigned long stall_watermark = 200;
1572
    u32 dpfc_ctl;
1573
 
1574
    dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1575
    dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1576
    I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1577
 
1578
    I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1579
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1580
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1581
    I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1582
 
1583
    /* enable it... */
1584
    I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1585
 
1586
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1587
}
1588
 
1589
static void g4x_disable_fbc(struct drm_device *dev)
1590
{
1591
    struct drm_i915_private *dev_priv = dev->dev_private;
1592
    u32 dpfc_ctl;
1593
 
1594
    /* Disable compression */
1595
    dpfc_ctl = I915_READ(DPFC_CONTROL);
1596
    if (dpfc_ctl & DPFC_CTL_EN) {
1597
        dpfc_ctl &= ~DPFC_CTL_EN;
1598
        I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1599
 
1600
        DRM_DEBUG_KMS("disabled FBC\n");
1601
    }
1602
}
1603
 
1604
static bool g4x_fbc_enabled(struct drm_device *dev)
1605
{
1606
    struct drm_i915_private *dev_priv = dev->dev_private;
1607
 
1608
    return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1609
}
1610
 
1611
static void sandybridge_blit_fbc_update(struct drm_device *dev)
1612
{
1613
	struct drm_i915_private *dev_priv = dev->dev_private;
1614
	u32 blt_ecoskpd;
1615
 
1616
	/* Make sure blitter notifies FBC of writes */
1617
	gen6_gt_force_wake_get(dev_priv);
1618
	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1619
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1620
		GEN6_BLITTER_LOCK_SHIFT;
1621
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1622
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1623
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1624
	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1625
			 GEN6_BLITTER_LOCK_SHIFT);
1626
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1627
	POSTING_READ(GEN6_BLITTER_ECOSKPD);
1628
	gen6_gt_force_wake_put(dev_priv);
1629
}
1630
 
1631
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1632
{
1633
    struct drm_device *dev = crtc->dev;
1634
    struct drm_i915_private *dev_priv = dev->dev_private;
1635
    struct drm_framebuffer *fb = crtc->fb;
1636
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1637
    struct drm_i915_gem_object *obj = intel_fb->obj;
1638
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1639
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1640
    unsigned long stall_watermark = 200;
1641
    u32 dpfc_ctl;
1642
 
1643
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1644
    dpfc_ctl &= DPFC_RESERVED;
1645
    dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1646
    /* Set persistent mode for front-buffer rendering, ala X. */
1647
    dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1648
    dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1649
    I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1650
 
1651
    I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1652
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1653
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1654
    I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1655
    I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1656
    /* enable it... */
1657
    I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1658
 
1659
    if (IS_GEN6(dev)) {
1660
        I915_WRITE(SNB_DPFC_CTL_SA,
1661
               SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1662
        I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1663
        sandybridge_blit_fbc_update(dev);
1664
    }
1665
 
1666
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1667
}
1668
 
1669
static void ironlake_disable_fbc(struct drm_device *dev)
1670
{
1671
    struct drm_i915_private *dev_priv = dev->dev_private;
1672
    u32 dpfc_ctl;
1673
 
1674
    /* Disable compression */
1675
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1676
    if (dpfc_ctl & DPFC_CTL_EN) {
1677
        dpfc_ctl &= ~DPFC_CTL_EN;
1678
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1679
 
1680
        DRM_DEBUG_KMS("disabled FBC\n");
1681
    }
1682
}
1683
 
1684
static bool ironlake_fbc_enabled(struct drm_device *dev)
1685
{
1686
    struct drm_i915_private *dev_priv = dev->dev_private;
1687
 
1688
    return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1689
}
1690
 
1691
bool intel_fbc_enabled(struct drm_device *dev)
1692
{
1693
	struct drm_i915_private *dev_priv = dev->dev_private;
1694
 
1695
	if (!dev_priv->display.fbc_enabled)
1696
		return false;
1697
 
1698
	return dev_priv->display.fbc_enabled(dev);
1699
}
1700
 
1701
 
1702
 
1703
 
1704
 
1705
 
1706
 
1707
 
1708
 
1709
 
1710
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1711
{
1712
	struct intel_fbc_work *work;
1713
	struct drm_device *dev = crtc->dev;
1714
	struct drm_i915_private *dev_priv = dev->dev_private;
1715
 
1716
	if (!dev_priv->display.enable_fbc)
1717
		return;
1718
 
1719
//	intel_cancel_fbc_work(dev_priv);
1720
 
1721
//	work = kzalloc(sizeof *work, GFP_KERNEL);
1722
//	if (work == NULL) {
1723
//		dev_priv->display.enable_fbc(crtc, interval);
1724
//		return;
1725
//	}
1726
 
1727
//	work->crtc = crtc;
1728
//	work->fb = crtc->fb;
1729
//	work->interval = interval;
1730
//	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1731
 
1732
//	dev_priv->fbc_work = work;
1733
 
1734
	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1735
 
1736
	/* Delay the actual enabling to let pageflipping cease and the
1737
	 * display to settle before starting the compression. Note that
1738
	 * this delay also serves a second purpose: it allows for a
1739
	 * vblank to pass after disabling the FBC before we attempt
1740
	 * to modify the control registers.
1741
	 *
1742
	 * A more complicated solution would involve tracking vblanks
1743
	 * following the termination of the page-flipping sequence
1744
	 * and indeed performing the enable as a co-routine and not
1745
	 * waiting synchronously upon the vblank.
1746
	 */
1747
//	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1748
}
1749
 
1750
void intel_disable_fbc(struct drm_device *dev)
1751
{
1752
	struct drm_i915_private *dev_priv = dev->dev_private;
1753
 
1754
//   intel_cancel_fbc_work(dev_priv);
1755
 
1756
	if (!dev_priv->display.disable_fbc)
1757
		return;
1758
 
1759
	dev_priv->display.disable_fbc(dev);
1760
	dev_priv->cfb_plane = -1;
1761
}
1762
 
1763
/**
1764
 * intel_update_fbc - enable/disable FBC as needed
1765
 * @dev: the drm_device
1766
 *
1767
 * Set up the framebuffer compression hardware at mode set time.  We
1768
 * enable it if possible:
1769
 *   - plane A only (on pre-965)
1770
 *   - no pixel mulitply/line duplication
1771
 *   - no alpha buffer discard
1772
 *   - no dual wide
1773
 *   - framebuffer <= 2048 in width, 1536 in height
1774
 *
1775
 * We can't assume that any compression will take place (worst case),
1776
 * so the compressed buffer has to be the same size as the uncompressed
1777
 * one.  It also must reside (along with the line length buffer) in
1778
 * stolen memory.
1779
 *
1780
 * We need to enable/disable FBC on a global basis.
1781
 */
1782
static void intel_update_fbc(struct drm_device *dev)
1783
{
1784
	struct drm_i915_private *dev_priv = dev->dev_private;
1785
	struct drm_crtc *crtc = NULL, *tmp_crtc;
1786
	struct intel_crtc *intel_crtc;
1787
	struct drm_framebuffer *fb;
1788
	struct intel_framebuffer *intel_fb;
1789
	struct drm_i915_gem_object *obj;
1790
 
1791
	DRM_DEBUG_KMS("\n");
1792
 
1793
	if (!i915_powersave)
1794
		return;
1795
 
1796
	if (!I915_HAS_FBC(dev))
1797
		return;
1798
 
1799
	/*
1800
	 * If FBC is already on, we just have to verify that we can
1801
	 * keep it that way...
1802
	 * Need to disable if:
1803
	 *   - more than one pipe is active
1804
	 *   - changing FBC params (stride, fence, mode)
1805
	 *   - new fb is too large to fit in compressed buffer
1806
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
1807
	 */
1808
	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1809
		if (tmp_crtc->enabled && tmp_crtc->fb) {
1810
			if (crtc) {
1811
				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
2336 Serge 1812
                dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
2327 Serge 1813
				goto out_disable;
1814
			}
1815
			crtc = tmp_crtc;
1816
		}
1817
	}
1818
 
1819
	if (!crtc || crtc->fb == NULL) {
1820
		DRM_DEBUG_KMS("no output, disabling\n");
2336 Serge 1821
        dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
2327 Serge 1822
		goto out_disable;
1823
	}
1824
 
1825
	intel_crtc = to_intel_crtc(crtc);
1826
	fb = crtc->fb;
1827
	intel_fb = to_intel_framebuffer(fb);
1828
	obj = intel_fb->obj;
1829
 
1830
	if (!i915_enable_fbc) {
1831
		DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
2336 Serge 1832
        dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
2327 Serge 1833
		goto out_disable;
1834
	}
1835
	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1836
		DRM_DEBUG_KMS("framebuffer too large, disabling "
1837
			      "compression\n");
2336 Serge 1838
        dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
2327 Serge 1839
		goto out_disable;
1840
	}
1841
	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1842
	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1843
		DRM_DEBUG_KMS("mode incompatible with compression, "
1844
			      "disabling\n");
2336 Serge 1845
        dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
2327 Serge 1846
		goto out_disable;
1847
	}
1848
	if ((crtc->mode.hdisplay > 2048) ||
1849
	    (crtc->mode.vdisplay > 1536)) {
1850
		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
2336 Serge 1851
        dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
2327 Serge 1852
		goto out_disable;
1853
	}
1854
	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1855
		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
2336 Serge 1856
        dev_priv->no_fbc_reason = FBC_BAD_PLANE;
2327 Serge 1857
		goto out_disable;
1858
	}
1859
 
1860
	/* The use of a CPU fence is mandatory in order to detect writes
1861
	 * by the CPU to the scanout and trigger updates to the FBC.
1862
	 */
1863
//	if (obj->tiling_mode != I915_TILING_X ||
1864
//	    obj->fence_reg == I915_FENCE_REG_NONE) {
1865
//		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1866
//		dev_priv->no_fbc_reason = FBC_NOT_TILED;
1867
//		goto out_disable;
1868
//	}
1869
 
1870
	/* If the kernel debugger is active, always disable compression */
1871
	if (in_dbg_master())
1872
		goto out_disable;
1873
 
1874
	/* If the scanout has not changed, don't modify the FBC settings.
1875
	 * Note that we make the fundamental assumption that the fb->obj
1876
	 * cannot be unpinned (and have its GTT offset and fence revoked)
1877
	 * without first being decoupled from the scanout and FBC disabled.
1878
	 */
1879
	if (dev_priv->cfb_plane == intel_crtc->plane &&
1880
	    dev_priv->cfb_fb == fb->base.id &&
1881
	    dev_priv->cfb_y == crtc->y)
1882
		return;
1883
 
1884
	if (intel_fbc_enabled(dev)) {
1885
		/* We update FBC along two paths, after changing fb/crtc
1886
		 * configuration (modeswitching) and after page-flipping
1887
		 * finishes. For the latter, we know that not only did
1888
		 * we disable the FBC at the start of the page-flip
1889
		 * sequence, but also more than one vblank has passed.
1890
		 *
1891
		 * For the former case of modeswitching, it is possible
1892
		 * to switch between two FBC valid configurations
1893
		 * instantaneously so we do need to disable the FBC
1894
		 * before we can modify its control registers. We also
1895
		 * have to wait for the next vblank for that to take
1896
		 * effect. However, since we delay enabling FBC we can
1897
		 * assume that a vblank has passed since disabling and
1898
		 * that we can safely alter the registers in the deferred
1899
		 * callback.
1900
		 *
1901
		 * In the scenario that we go from a valid to invalid
1902
		 * and then back to valid FBC configuration we have
1903
		 * no strict enforcement that a vblank occurred since
1904
		 * disabling the FBC. However, along all current pipe
1905
		 * disabling paths we do need to wait for a vblank at
1906
		 * some point. And we wait before enabling FBC anyway.
1907
		 */
1908
		DRM_DEBUG_KMS("disabling active FBC for update\n");
1909
		intel_disable_fbc(dev);
1910
	}
1911
 
1912
	intel_enable_fbc(crtc, 500);
1913
	return;
1914
 
1915
out_disable:
1916
	/* Multiple disables should be harmless */
1917
	if (intel_fbc_enabled(dev)) {
1918
		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1919
		intel_disable_fbc(dev);
1920
	}
1921
}
1922
 
2335 Serge 1923
int
1924
intel_pin_and_fence_fb_obj(struct drm_device *dev,
1925
			   struct drm_i915_gem_object *obj,
1926
			   struct intel_ring_buffer *pipelined)
1927
{
1928
	struct drm_i915_private *dev_priv = dev->dev_private;
1929
	u32 alignment;
1930
	int ret;
2327 Serge 1931
 
2335 Serge 1932
	switch (obj->tiling_mode) {
1933
	case I915_TILING_NONE:
1934
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1935
			alignment = 128 * 1024;
1936
		else if (INTEL_INFO(dev)->gen >= 4)
1937
			alignment = 4 * 1024;
1938
		else
1939
			alignment = 64 * 1024;
1940
		break;
1941
	case I915_TILING_X:
1942
		/* pin() will align the object as required by fence */
1943
		alignment = 0;
1944
		break;
1945
	case I915_TILING_Y:
1946
		/* FIXME: Is this true? */
1947
		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1948
		return -EINVAL;
1949
	default:
1950
		BUG();
1951
	}
2327 Serge 1952
 
2335 Serge 1953
	dev_priv->mm.interruptible = false;
1954
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1955
	if (ret)
1956
		goto err_interruptible;
2327 Serge 1957
 
2335 Serge 1958
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1959
	 * fence, whereas 965+ only requires a fence if using
1960
	 * framebuffer compression.  For simplicity, we always install
1961
	 * a fence as the cost is not that onerous.
1962
	 */
1963
//	if (obj->tiling_mode != I915_TILING_NONE) {
1964
//		ret = i915_gem_object_get_fence(obj, pipelined);
1965
//		if (ret)
1966
//			goto err_unpin;
1967
//	}
2327 Serge 1968
 
2335 Serge 1969
	dev_priv->mm.interruptible = true;
1970
	return 0;
2327 Serge 1971
 
2335 Serge 1972
err_unpin:
1973
//	i915_gem_object_unpin(obj);
1974
err_interruptible:
1975
	dev_priv->mm.interruptible = true;
1976
	return ret;
1977
}
2327 Serge 1978
 
1979
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1980
                 int x, int y)
1981
{
1982
    struct drm_device *dev = crtc->dev;
1983
    struct drm_i915_private *dev_priv = dev->dev_private;
1984
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1985
    struct intel_framebuffer *intel_fb;
1986
    struct drm_i915_gem_object *obj;
1987
    int plane = intel_crtc->plane;
1988
    unsigned long Start, Offset;
1989
    u32 dspcntr;
1990
    u32 reg;
1991
 
1992
    switch (plane) {
1993
    case 0:
1994
    case 1:
1995
        break;
1996
    default:
1997
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1998
        return -EINVAL;
1999
    }
2000
 
2001
    intel_fb = to_intel_framebuffer(fb);
2002
    obj = intel_fb->obj;
2003
 
2004
    reg = DSPCNTR(plane);
2005
    dspcntr = I915_READ(reg);
2006
    /* Mask out pixel format bits in case we change it */
2007
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2008
    switch (fb->bits_per_pixel) {
2009
    case 8:
2010
        dspcntr |= DISPPLANE_8BPP;
2011
        break;
2012
    case 16:
2013
        if (fb->depth == 15)
2014
            dspcntr |= DISPPLANE_15_16BPP;
2015
        else
2016
            dspcntr |= DISPPLANE_16BPP;
2017
        break;
2018
    case 24:
2019
    case 32:
2020
        dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2021
        break;
2022
    default:
2023
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2024
        return -EINVAL;
2025
    }
2026
    if (INTEL_INFO(dev)->gen >= 4) {
2027
        if (obj->tiling_mode != I915_TILING_NONE)
2028
            dspcntr |= DISPPLANE_TILED;
2029
        else
2030
            dspcntr &= ~DISPPLANE_TILED;
2031
    }
2032
 
2033
    I915_WRITE(reg, dspcntr);
2034
 
2035
    Start = obj->gtt_offset;
2036
    Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2037
 
2038
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2039
              Start, Offset, x, y, fb->pitch);
2040
    I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2041
    if (INTEL_INFO(dev)->gen >= 4) {
2042
        I915_WRITE(DSPSURF(plane), Start);
2043
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2044
        I915_WRITE(DSPADDR(plane), Offset);
2045
    } else
2046
        I915_WRITE(DSPADDR(plane), Start + Offset);
2047
    POSTING_READ(reg);
2048
 
2049
    return 0;
2050
}
2051
 
2052
static int ironlake_update_plane(struct drm_crtc *crtc,
2053
                 struct drm_framebuffer *fb, int x, int y)
2054
{
2055
    struct drm_device *dev = crtc->dev;
2056
    struct drm_i915_private *dev_priv = dev->dev_private;
2057
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2058
    struct intel_framebuffer *intel_fb;
2059
    struct drm_i915_gem_object *obj;
2060
    int plane = intel_crtc->plane;
2061
    unsigned long Start, Offset;
2062
    u32 dspcntr;
2063
    u32 reg;
2064
 
2065
    switch (plane) {
2066
    case 0:
2067
    case 1:
2068
        break;
2069
    default:
2070
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2071
        return -EINVAL;
2072
    }
2073
 
2074
    intel_fb = to_intel_framebuffer(fb);
2075
    obj = intel_fb->obj;
2076
 
2077
    reg = DSPCNTR(plane);
2078
    dspcntr = I915_READ(reg);
2079
    /* Mask out pixel format bits in case we change it */
2080
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2081
    switch (fb->bits_per_pixel) {
2082
    case 8:
2083
        dspcntr |= DISPPLANE_8BPP;
2084
        break;
2085
    case 16:
2086
        if (fb->depth != 16)
2087
            return -EINVAL;
2088
 
2089
        dspcntr |= DISPPLANE_16BPP;
2090
        break;
2091
    case 24:
2092
    case 32:
2093
        if (fb->depth == 24)
2094
            dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2095
        else if (fb->depth == 30)
2096
            dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2097
        else
2098
            return -EINVAL;
2099
        break;
2100
    default:
2101
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2102
        return -EINVAL;
2103
    }
2104
 
2105
//    if (obj->tiling_mode != I915_TILING_NONE)
2106
//        dspcntr |= DISPPLANE_TILED;
2107
//    else
2108
        dspcntr &= ~DISPPLANE_TILED;
2109
 
2110
    /* must disable */
2111
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2112
 
2113
    I915_WRITE(reg, dspcntr);
2114
 
2336 Serge 2115
    Start = obj->gtt_offset;
2116
    Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2327 Serge 2117
 
2118
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2119
              Start, Offset, x, y, fb->pitch);
2330 Serge 2120
	I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2121
	I915_WRITE(DSPSURF(plane), Start);
2122
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2123
	I915_WRITE(DSPADDR(plane), Offset);
2124
	POSTING_READ(reg);
2327 Serge 2125
 
2126
    return 0;
2127
}
2128
 
2129
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2130
static int
2131
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2132
			   int x, int y, enum mode_set_atomic state)
2133
{
2134
	struct drm_device *dev = crtc->dev;
2135
	struct drm_i915_private *dev_priv = dev->dev_private;
2136
	int ret;
2137
 
2336 Serge 2138
    ENTER();
2139
 
2327 Serge 2140
	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2141
	if (ret)
2336 Serge 2142
    {
2143
        LEAVE();
2327 Serge 2144
		return ret;
2336 Serge 2145
    };
2327 Serge 2146
 
2147
	intel_update_fbc(dev);
2148
	intel_increase_pllclock(crtc);
2336 Serge 2149
    LEAVE();
2327 Serge 2150
 
2151
	return 0;
2152
}
2153
 
2154
static int
2155
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2156
		    struct drm_framebuffer *old_fb)
2157
{
2158
	struct drm_device *dev = crtc->dev;
2159
	struct drm_i915_master_private *master_priv;
2160
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2336 Serge 2161
    int ret = 0;
2327 Serge 2162
 
2336 Serge 2163
    ENTER();
2164
 
2327 Serge 2165
	/* no fb bound */
2166
	if (!crtc->fb) {
2167
		DRM_ERROR("No FB bound\n");
2168
		return 0;
2169
	}
2170
 
2171
	switch (intel_crtc->plane) {
2172
	case 0:
2173
	case 1:
2174
		break;
2175
	default:
2176
		DRM_ERROR("no plane for crtc\n");
2177
		return -EINVAL;
2178
	}
2179
 
2180
	mutex_lock(&dev->struct_mutex);
2181
 
2336 Serge 2182
    ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2183
					 LEAVE_ATOMIC_MODE_SET);
2327 Serge 2184
 
2336 Serge 2185
    dbgprintf("set base atomic done ret= %d\n", ret);
2327 Serge 2186
 
2187
	if (ret) {
2188
//       i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2189
		mutex_unlock(&dev->struct_mutex);
2190
		DRM_ERROR("failed to update base address\n");
2336 Serge 2191
        LEAVE();
2327 Serge 2192
		return ret;
2193
	}
2194
 
2336 Serge 2195
	mutex_unlock(&dev->struct_mutex);
2327 Serge 2196
 
2336 Serge 2197
 
2198
    LEAVE();
2199
    return 0;
2200
 
2330 Serge 2201
#if 0
2336 Serge 2202
 
2330 Serge 2203
	if (!dev->primary->master)
2336 Serge 2204
    {
2205
        LEAVE();
2330 Serge 2206
		return 0;
2336 Serge 2207
    };
2327 Serge 2208
 
2330 Serge 2209
	master_priv = dev->primary->master->driver_priv;
2210
	if (!master_priv->sarea_priv)
2336 Serge 2211
    {
2212
        LEAVE();
2330 Serge 2213
		return 0;
2336 Serge 2214
    };
2327 Serge 2215
 
2330 Serge 2216
	if (intel_crtc->pipe) {
2217
		master_priv->sarea_priv->pipeB_x = x;
2218
		master_priv->sarea_priv->pipeB_y = y;
2219
	} else {
2220
		master_priv->sarea_priv->pipeA_x = x;
2221
		master_priv->sarea_priv->pipeA_y = y;
2222
	}
2336 Serge 2223
    LEAVE();
2224
 
2225
	return 0;
2330 Serge 2226
#endif
2336 Serge 2227
 
2327 Serge 2228
}
2229
 
2230
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2231
{
2232
	struct drm_device *dev = crtc->dev;
2233
	struct drm_i915_private *dev_priv = dev->dev_private;
2234
	u32 dpa_ctl;
2235
 
2236
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2237
	dpa_ctl = I915_READ(DP_A);
2238
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2239
 
2240
	if (clock < 200000) {
2241
		u32 temp;
2242
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2243
		/* workaround for 160Mhz:
2244
		   1) program 0x4600c bits 15:0 = 0x8124
2245
		   2) program 0x46010 bit 0 = 1
2246
		   3) program 0x46034 bit 24 = 1
2247
		   4) program 0x64000 bit 14 = 1
2248
		   */
2249
		temp = I915_READ(0x4600c);
2250
		temp &= 0xffff0000;
2251
		I915_WRITE(0x4600c, temp | 0x8124);
2252
 
2253
		temp = I915_READ(0x46010);
2254
		I915_WRITE(0x46010, temp | 1);
2255
 
2256
		temp = I915_READ(0x46034);
2257
		I915_WRITE(0x46034, temp | (1 << 24));
2258
	} else {
2259
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2260
	}
2261
	I915_WRITE(DP_A, dpa_ctl);
2262
 
2263
	POSTING_READ(DP_A);
2264
	udelay(500);
2265
}
2266
 
2267
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2268
{
2269
	struct drm_device *dev = crtc->dev;
2270
	struct drm_i915_private *dev_priv = dev->dev_private;
2271
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2272
	int pipe = intel_crtc->pipe;
2273
	u32 reg, temp;
2274
 
2275
	/* enable normal train */
2276
	reg = FDI_TX_CTL(pipe);
2277
	temp = I915_READ(reg);
2278
	if (IS_IVYBRIDGE(dev)) {
2279
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2280
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2281
	} else {
2282
		temp &= ~FDI_LINK_TRAIN_NONE;
2283
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2284
	}
2285
	I915_WRITE(reg, temp);
2286
 
2287
	reg = FDI_RX_CTL(pipe);
2288
	temp = I915_READ(reg);
2289
	if (HAS_PCH_CPT(dev)) {
2290
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2291
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2292
	} else {
2293
		temp &= ~FDI_LINK_TRAIN_NONE;
2294
		temp |= FDI_LINK_TRAIN_NONE;
2295
	}
2296
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2297
 
2298
	/* wait one idle pattern time */
2299
	POSTING_READ(reg);
2300
	udelay(1000);
2301
 
2302
	/* IVB wants error correction enabled */
2303
	if (IS_IVYBRIDGE(dev))
2304
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2305
			   FDI_FE_ERRC_ENABLE);
2306
}
2307
 
2308
static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2309
{
2310
	struct drm_i915_private *dev_priv = dev->dev_private;
2311
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2312
 
2313
	flags |= FDI_PHASE_SYNC_OVR(pipe);
2314
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2315
	flags |= FDI_PHASE_SYNC_EN(pipe);
2316
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2317
	POSTING_READ(SOUTH_CHICKEN1);
2318
}
2319
 
2320
/* The FDI link training functions for ILK/Ibexpeak. */
2321
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2322
{
2323
    struct drm_device *dev = crtc->dev;
2324
    struct drm_i915_private *dev_priv = dev->dev_private;
2325
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2326
    int pipe = intel_crtc->pipe;
2327
    int plane = intel_crtc->plane;
2328
    u32 reg, temp, tries;
2329
 
2330
    /* FDI needs bits from pipe & plane first */
2331
    assert_pipe_enabled(dev_priv, pipe);
2332
    assert_plane_enabled(dev_priv, plane);
2333
 
2334
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2335
       for train result */
2336
    reg = FDI_RX_IMR(pipe);
2337
    temp = I915_READ(reg);
2338
    temp &= ~FDI_RX_SYMBOL_LOCK;
2339
    temp &= ~FDI_RX_BIT_LOCK;
2340
    I915_WRITE(reg, temp);
2341
    I915_READ(reg);
2342
    udelay(150);
2343
 
2344
    /* enable CPU FDI TX and PCH FDI RX */
2345
    reg = FDI_TX_CTL(pipe);
2346
    temp = I915_READ(reg);
2347
    temp &= ~(7 << 19);
2348
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2349
    temp &= ~FDI_LINK_TRAIN_NONE;
2350
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2351
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2352
 
2353
    reg = FDI_RX_CTL(pipe);
2354
    temp = I915_READ(reg);
2355
    temp &= ~FDI_LINK_TRAIN_NONE;
2356
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2357
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2358
 
2359
    POSTING_READ(reg);
2360
    udelay(150);
2361
 
2362
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2363
    if (HAS_PCH_IBX(dev)) {
2364
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2365
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2366
               FDI_RX_PHASE_SYNC_POINTER_EN);
2367
    }
2368
 
2369
    reg = FDI_RX_IIR(pipe);
2370
    for (tries = 0; tries < 5; tries++) {
2371
        temp = I915_READ(reg);
2372
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2373
 
2374
        if ((temp & FDI_RX_BIT_LOCK)) {
2375
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2376
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2377
            break;
2378
        }
2379
    }
2380
    if (tries == 5)
2381
        DRM_ERROR("FDI train 1 fail!\n");
2382
 
2383
    /* Train 2 */
2384
    reg = FDI_TX_CTL(pipe);
2385
    temp = I915_READ(reg);
2386
    temp &= ~FDI_LINK_TRAIN_NONE;
2387
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2388
    I915_WRITE(reg, temp);
2389
 
2390
    reg = FDI_RX_CTL(pipe);
2391
    temp = I915_READ(reg);
2392
    temp &= ~FDI_LINK_TRAIN_NONE;
2393
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2394
    I915_WRITE(reg, temp);
2395
 
2396
    POSTING_READ(reg);
2397
    udelay(150);
2398
 
2399
    reg = FDI_RX_IIR(pipe);
2400
    for (tries = 0; tries < 5; tries++) {
2401
        temp = I915_READ(reg);
2402
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2403
 
2404
        if (temp & FDI_RX_SYMBOL_LOCK) {
2405
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2406
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2407
            break;
2408
        }
2409
    }
2410
    if (tries == 5)
2411
        DRM_ERROR("FDI train 2 fail!\n");
2412
 
2413
    DRM_DEBUG_KMS("FDI train done\n");
2414
 
2415
}
2416
 
2417
static const int snb_b_fdi_train_param [] = {
2418
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2419
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2420
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2421
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2422
};
2423
 
2424
/* The FDI link training functions for SNB/Cougarpoint. */
2425
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2426
{
2427
    struct drm_device *dev = crtc->dev;
2428
    struct drm_i915_private *dev_priv = dev->dev_private;
2429
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2430
    int pipe = intel_crtc->pipe;
2431
    u32 reg, temp, i;
2432
 
2433
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2434
       for train result */
2435
    reg = FDI_RX_IMR(pipe);
2436
    temp = I915_READ(reg);
2437
    temp &= ~FDI_RX_SYMBOL_LOCK;
2438
    temp &= ~FDI_RX_BIT_LOCK;
2439
    I915_WRITE(reg, temp);
2440
 
2441
    POSTING_READ(reg);
2442
    udelay(150);
2443
 
2444
    /* enable CPU FDI TX and PCH FDI RX */
2445
    reg = FDI_TX_CTL(pipe);
2446
    temp = I915_READ(reg);
2447
    temp &= ~(7 << 19);
2448
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2449
    temp &= ~FDI_LINK_TRAIN_NONE;
2450
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2451
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2452
    /* SNB-B */
2453
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2454
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2455
 
2456
    reg = FDI_RX_CTL(pipe);
2457
    temp = I915_READ(reg);
2458
    if (HAS_PCH_CPT(dev)) {
2459
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2460
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2461
    } else {
2462
        temp &= ~FDI_LINK_TRAIN_NONE;
2463
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2464
    }
2465
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2466
 
2467
    POSTING_READ(reg);
2468
    udelay(150);
2469
 
2470
    if (HAS_PCH_CPT(dev))
2471
        cpt_phase_pointer_enable(dev, pipe);
2472
 
2473
    for (i = 0; i < 4; i++ ) {
2474
        reg = FDI_TX_CTL(pipe);
2475
        temp = I915_READ(reg);
2476
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2477
        temp |= snb_b_fdi_train_param[i];
2478
        I915_WRITE(reg, temp);
2479
 
2480
        POSTING_READ(reg);
2481
        udelay(500);
2482
 
2483
        reg = FDI_RX_IIR(pipe);
2484
        temp = I915_READ(reg);
2485
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2486
 
2487
        if (temp & FDI_RX_BIT_LOCK) {
2488
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2489
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2490
            break;
2491
        }
2492
    }
2493
    if (i == 4)
2494
        DRM_ERROR("FDI train 1 fail!\n");
2495
 
2496
    /* Train 2 */
2497
    reg = FDI_TX_CTL(pipe);
2498
    temp = I915_READ(reg);
2499
    temp &= ~FDI_LINK_TRAIN_NONE;
2500
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2501
    if (IS_GEN6(dev)) {
2502
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2503
        /* SNB-B */
2504
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2505
    }
2506
    I915_WRITE(reg, temp);
2507
 
2508
    reg = FDI_RX_CTL(pipe);
2509
    temp = I915_READ(reg);
2510
    if (HAS_PCH_CPT(dev)) {
2511
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2512
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2513
    } else {
2514
        temp &= ~FDI_LINK_TRAIN_NONE;
2515
        temp |= FDI_LINK_TRAIN_PATTERN_2;
2516
    }
2517
    I915_WRITE(reg, temp);
2518
 
2519
    POSTING_READ(reg);
2520
    udelay(150);
2521
 
2522
    for (i = 0; i < 4; i++ ) {
2523
        reg = FDI_TX_CTL(pipe);
2524
        temp = I915_READ(reg);
2525
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2526
        temp |= snb_b_fdi_train_param[i];
2527
        I915_WRITE(reg, temp);
2528
 
2529
        POSTING_READ(reg);
2530
        udelay(500);
2531
 
2532
        reg = FDI_RX_IIR(pipe);
2533
        temp = I915_READ(reg);
2534
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2535
 
2536
        if (temp & FDI_RX_SYMBOL_LOCK) {
2537
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2538
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2539
            break;
2540
        }
2541
    }
2542
    if (i == 4)
2543
        DRM_ERROR("FDI train 2 fail!\n");
2544
 
2545
    DRM_DEBUG_KMS("FDI train done.\n");
2546
}
2547
 
2548
/* Manual link training for Ivy Bridge A0 parts */
2549
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2550
{
2551
    struct drm_device *dev = crtc->dev;
2552
    struct drm_i915_private *dev_priv = dev->dev_private;
2553
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2554
    int pipe = intel_crtc->pipe;
2555
    u32 reg, temp, i;
2556
 
2557
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2558
       for train result */
2559
    reg = FDI_RX_IMR(pipe);
2560
    temp = I915_READ(reg);
2561
    temp &= ~FDI_RX_SYMBOL_LOCK;
2562
    temp &= ~FDI_RX_BIT_LOCK;
2563
    I915_WRITE(reg, temp);
2564
 
2565
    POSTING_READ(reg);
2566
    udelay(150);
2567
 
2568
    /* enable CPU FDI TX and PCH FDI RX */
2569
    reg = FDI_TX_CTL(pipe);
2570
    temp = I915_READ(reg);
2571
    temp &= ~(7 << 19);
2572
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2573
    temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2574
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2575
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2576
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2577
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2578
 
2579
    reg = FDI_RX_CTL(pipe);
2580
    temp = I915_READ(reg);
2581
    temp &= ~FDI_LINK_TRAIN_AUTO;
2582
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2583
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2584
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2585
 
2586
    POSTING_READ(reg);
2587
    udelay(150);
2588
 
2589
    if (HAS_PCH_CPT(dev))
2590
        cpt_phase_pointer_enable(dev, pipe);
2591
 
2592
    for (i = 0; i < 4; i++ ) {
2593
        reg = FDI_TX_CTL(pipe);
2594
        temp = I915_READ(reg);
2595
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2596
        temp |= snb_b_fdi_train_param[i];
2597
        I915_WRITE(reg, temp);
2598
 
2599
        POSTING_READ(reg);
2600
        udelay(500);
2601
 
2602
        reg = FDI_RX_IIR(pipe);
2603
        temp = I915_READ(reg);
2604
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2605
 
2606
        if (temp & FDI_RX_BIT_LOCK ||
2607
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2608
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2609
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2610
            break;
2611
        }
2612
    }
2613
    if (i == 4)
2614
        DRM_ERROR("FDI train 1 fail!\n");
2615
 
2616
    /* Train 2 */
2617
    reg = FDI_TX_CTL(pipe);
2618
    temp = I915_READ(reg);
2619
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2620
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2621
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2622
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2623
    I915_WRITE(reg, temp);
2624
 
2625
    reg = FDI_RX_CTL(pipe);
2626
    temp = I915_READ(reg);
2627
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2628
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2629
    I915_WRITE(reg, temp);
2630
 
2631
    POSTING_READ(reg);
2632
    udelay(150);
2633
 
2634
    for (i = 0; i < 4; i++ ) {
2635
        reg = FDI_TX_CTL(pipe);
2636
        temp = I915_READ(reg);
2637
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2638
        temp |= snb_b_fdi_train_param[i];
2639
        I915_WRITE(reg, temp);
2640
 
2641
        POSTING_READ(reg);
2642
        udelay(500);
2643
 
2644
        reg = FDI_RX_IIR(pipe);
2645
        temp = I915_READ(reg);
2646
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2647
 
2648
        if (temp & FDI_RX_SYMBOL_LOCK) {
2649
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2650
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2651
            break;
2652
        }
2653
    }
2654
    if (i == 4)
2655
        DRM_ERROR("FDI train 2 fail!\n");
2656
 
2657
    DRM_DEBUG_KMS("FDI train done.\n");
2658
}
2659
 
2660
static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2661
{
2662
	struct drm_device *dev = crtc->dev;
2663
	struct drm_i915_private *dev_priv = dev->dev_private;
2664
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2665
	int pipe = intel_crtc->pipe;
2666
	u32 reg, temp;
2667
 
2668
	/* Write the TU size bits so error detection works */
2669
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2670
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2671
 
2672
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2673
	reg = FDI_RX_CTL(pipe);
2674
	temp = I915_READ(reg);
2675
	temp &= ~((0x7 << 19) | (0x7 << 16));
2676
	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2677
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2678
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2679
 
2680
	POSTING_READ(reg);
2681
	udelay(200);
2682
 
2683
	/* Switch from Rawclk to PCDclk */
2684
	temp = I915_READ(reg);
2685
	I915_WRITE(reg, temp | FDI_PCDCLK);
2686
 
2687
	POSTING_READ(reg);
2688
	udelay(200);
2689
 
2690
	/* Enable CPU FDI TX PLL, always on for Ironlake */
2691
	reg = FDI_TX_CTL(pipe);
2692
	temp = I915_READ(reg);
2693
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2694
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2695
 
2696
		POSTING_READ(reg);
2697
		udelay(100);
2698
	}
2699
}
2700
 
2701
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2702
{
2703
	struct drm_i915_private *dev_priv = dev->dev_private;
2704
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2705
 
2706
	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2707
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2708
	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2709
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2710
	POSTING_READ(SOUTH_CHICKEN1);
2711
}
2712
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2713
{
2714
	struct drm_device *dev = crtc->dev;
2715
	struct drm_i915_private *dev_priv = dev->dev_private;
2716
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2717
	int pipe = intel_crtc->pipe;
2718
	u32 reg, temp;
2719
 
2720
	/* disable CPU FDI tx and PCH FDI rx */
2721
	reg = FDI_TX_CTL(pipe);
2722
	temp = I915_READ(reg);
2723
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2724
	POSTING_READ(reg);
2725
 
2726
	reg = FDI_RX_CTL(pipe);
2727
	temp = I915_READ(reg);
2728
	temp &= ~(0x7 << 16);
2729
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2730
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2731
 
2732
	POSTING_READ(reg);
2733
	udelay(100);
2734
 
2735
	/* Ironlake workaround, disable clock pointer after downing FDI */
2736
	if (HAS_PCH_IBX(dev)) {
2737
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2738
		I915_WRITE(FDI_RX_CHICKEN(pipe),
2739
			   I915_READ(FDI_RX_CHICKEN(pipe) &
2740
				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2741
	} else if (HAS_PCH_CPT(dev)) {
2742
		cpt_phase_pointer_disable(dev, pipe);
2743
	}
2744
 
2745
	/* still set train pattern 1 */
2746
	reg = FDI_TX_CTL(pipe);
2747
	temp = I915_READ(reg);
2748
	temp &= ~FDI_LINK_TRAIN_NONE;
2749
	temp |= FDI_LINK_TRAIN_PATTERN_1;
2750
	I915_WRITE(reg, temp);
2751
 
2752
	reg = FDI_RX_CTL(pipe);
2753
	temp = I915_READ(reg);
2754
	if (HAS_PCH_CPT(dev)) {
2755
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2756
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2757
	} else {
2758
		temp &= ~FDI_LINK_TRAIN_NONE;
2759
		temp |= FDI_LINK_TRAIN_PATTERN_1;
2760
	}
2761
	/* BPC in FDI rx is consistent with that in PIPECONF */
2762
	temp &= ~(0x07 << 16);
2763
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2764
	I915_WRITE(reg, temp);
2765
 
2766
	POSTING_READ(reg);
2767
	udelay(100);
2768
}
2769
 
2770
/*
2771
 * When we disable a pipe, we need to clear any pending scanline wait events
2772
 * to avoid hanging the ring, which we assume we are waiting on.
2773
 */
2774
static void intel_clear_scanline_wait(struct drm_device *dev)
2775
{
2776
	struct drm_i915_private *dev_priv = dev->dev_private;
2777
	struct intel_ring_buffer *ring;
2778
	u32 tmp;
2779
 
2780
	if (IS_GEN2(dev))
2781
		/* Can't break the hang on i8xx */
2782
		return;
2783
 
2784
	ring = LP_RING(dev_priv);
2785
	tmp = I915_READ_CTL(ring);
2786
	if (tmp & RING_WAIT)
2787
		I915_WRITE_CTL(ring, tmp);
2788
}
2789
 
2790
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2791
{
2792
	struct drm_i915_gem_object *obj;
2793
	struct drm_i915_private *dev_priv;
2794
 
2795
	if (crtc->fb == NULL)
2796
		return;
2797
 
2798
	obj = to_intel_framebuffer(crtc->fb)->obj;
2799
	dev_priv = crtc->dev->dev_private;
2800
//	wait_event(dev_priv->pending_flip_queue,
2801
//		   atomic_read(&obj->pending_flip) == 0);
2802
}
2803
 
2804
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2805
{
2806
	struct drm_device *dev = crtc->dev;
2807
	struct drm_mode_config *mode_config = &dev->mode_config;
2808
	struct intel_encoder *encoder;
2809
 
2810
	/*
2811
	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2812
	 * must be driven by its own crtc; no sharing is possible.
2813
	 */
2814
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2815
		if (encoder->base.crtc != crtc)
2816
			continue;
2817
 
2818
		switch (encoder->type) {
2819
		case INTEL_OUTPUT_EDP:
2820
			if (!intel_encoder_is_pch_edp(&encoder->base))
2821
				return false;
2822
			continue;
2823
		}
2824
	}
2825
 
2826
	return true;
2827
}
2828
 
2829
/*
2830
 * Enable PCH resources required for PCH ports:
2831
 *   - PCH PLLs
2832
 *   - FDI training & RX/TX
2833
 *   - update transcoder timings
2834
 *   - DP transcoding bits
2835
 *   - transcoder
2836
 */
2837
static void ironlake_pch_enable(struct drm_crtc *crtc)
2838
{
2839
	struct drm_device *dev = crtc->dev;
2840
	struct drm_i915_private *dev_priv = dev->dev_private;
2841
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2842
	int pipe = intel_crtc->pipe;
2843
	u32 reg, temp;
2844
 
2845
	/* For PCH output, training FDI link */
2846
	dev_priv->display.fdi_link_train(crtc);
2847
 
2848
	intel_enable_pch_pll(dev_priv, pipe);
2849
 
2850
	if (HAS_PCH_CPT(dev)) {
2851
		/* Be sure PCH DPLL SEL is set */
2852
		temp = I915_READ(PCH_DPLL_SEL);
2853
		if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
2854
			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2855
		else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
2856
			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2857
		I915_WRITE(PCH_DPLL_SEL, temp);
2858
	}
2859
 
2860
	/* set transcoder timing, panel must allow it */
2861
	assert_panel_unlocked(dev_priv, pipe);
2862
	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2863
	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2864
	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2865
 
2866
	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2867
	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2868
	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2869
 
2870
	intel_fdi_normal_train(crtc);
2871
 
2872
	/* For PCH DP, enable TRANS_DP_CTL */
2873
	if (HAS_PCH_CPT(dev) &&
2874
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2875
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2876
		reg = TRANS_DP_CTL(pipe);
2877
		temp = I915_READ(reg);
2878
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2879
			  TRANS_DP_SYNC_MASK |
2880
			  TRANS_DP_BPC_MASK);
2881
		temp |= (TRANS_DP_OUTPUT_ENABLE |
2882
			 TRANS_DP_ENH_FRAMING);
2883
		temp |= bpc << 9; /* same format but at 11:9 */
2884
 
2885
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2886
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2887
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2888
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2889
 
2890
		switch (intel_trans_dp_port_sel(crtc)) {
2891
		case PCH_DP_B:
2892
			temp |= TRANS_DP_PORT_SEL_B;
2893
			break;
2894
		case PCH_DP_C:
2895
			temp |= TRANS_DP_PORT_SEL_C;
2896
			break;
2897
		case PCH_DP_D:
2898
			temp |= TRANS_DP_PORT_SEL_D;
2899
			break;
2900
		default:
2901
			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2902
			temp |= TRANS_DP_PORT_SEL_B;
2903
			break;
2904
		}
2905
 
2906
		I915_WRITE(reg, temp);
2907
	}
2908
 
2909
	intel_enable_transcoder(dev_priv, pipe);
2910
}
2911
 
2912
static void ironlake_crtc_enable(struct drm_crtc *crtc)
2913
{
2914
    struct drm_device *dev = crtc->dev;
2915
    struct drm_i915_private *dev_priv = dev->dev_private;
2916
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2917
    int pipe = intel_crtc->pipe;
2918
    int plane = intel_crtc->plane;
2919
    u32 temp;
2920
    bool is_pch_port;
2921
 
2922
    if (intel_crtc->active)
2923
        return;
2924
 
2925
    intel_crtc->active = true;
2926
    intel_update_watermarks(dev);
2927
 
2928
    if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2929
        temp = I915_READ(PCH_LVDS);
2930
        if ((temp & LVDS_PORT_EN) == 0)
2931
            I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2932
    }
2933
 
2934
    is_pch_port = intel_crtc_driving_pch(crtc);
2935
 
2936
    if (is_pch_port)
2937
        ironlake_fdi_pll_enable(crtc);
2938
    else
2939
        ironlake_fdi_disable(crtc);
2940
 
2941
    /* Enable panel fitting for LVDS */
2942
    if (dev_priv->pch_pf_size &&
2943
        (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2944
        /* Force use of hard-coded filter coefficients
2945
         * as some pre-programmed values are broken,
2946
         * e.g. x201.
2947
         */
2948
        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
2949
        I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
2950
        I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
2951
    }
2952
 
2953
    /*
2954
     * On ILK+ LUT must be loaded before the pipe is running but with
2955
     * clocks enabled
2956
     */
2957
    intel_crtc_load_lut(crtc);
2958
 
2959
    intel_enable_pipe(dev_priv, pipe, is_pch_port);
2960
    intel_enable_plane(dev_priv, plane, pipe);
2961
 
2962
    if (is_pch_port)
2963
        ironlake_pch_enable(crtc);
2964
 
2965
    mutex_lock(&dev->struct_mutex);
2966
    intel_update_fbc(dev);
2967
    mutex_unlock(&dev->struct_mutex);
2968
 
2969
//    intel_crtc_update_cursor(crtc, true);
2970
}
2971
 
2972
static void ironlake_crtc_disable(struct drm_crtc *crtc)
2973
{
2974
    struct drm_device *dev = crtc->dev;
2975
    struct drm_i915_private *dev_priv = dev->dev_private;
2976
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2977
    int pipe = intel_crtc->pipe;
2978
    int plane = intel_crtc->plane;
2979
    u32 reg, temp;
2980
 
2981
    if (!intel_crtc->active)
2982
        return;
2983
 
2336 Serge 2984
    ENTER();
2985
 
2327 Serge 2986
    intel_crtc_wait_for_pending_flips(crtc);
2987
//    drm_vblank_off(dev, pipe);
2988
//    intel_crtc_update_cursor(crtc, false);
2989
 
2990
    intel_disable_plane(dev_priv, plane, pipe);
2991
 
2992
    if (dev_priv->cfb_plane == plane)
2993
        intel_disable_fbc(dev);
2994
 
2995
    intel_disable_pipe(dev_priv, pipe);
2996
 
2997
    /* Disable PF */
2998
    I915_WRITE(PF_CTL(pipe), 0);
2999
    I915_WRITE(PF_WIN_SZ(pipe), 0);
3000
 
3001
    ironlake_fdi_disable(crtc);
3002
 
3003
    /* This is a horrible layering violation; we should be doing this in
3004
     * the connector/encoder ->prepare instead, but we don't always have
3005
     * enough information there about the config to know whether it will
3006
     * actually be necessary or just cause undesired flicker.
3007
     */
3008
    intel_disable_pch_ports(dev_priv, pipe);
3009
 
3010
    intel_disable_transcoder(dev_priv, pipe);
3011
 
3012
    if (HAS_PCH_CPT(dev)) {
3013
        /* disable TRANS_DP_CTL */
3014
        reg = TRANS_DP_CTL(pipe);
3015
        temp = I915_READ(reg);
3016
        temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3017
        temp |= TRANS_DP_PORT_SEL_NONE;
3018
        I915_WRITE(reg, temp);
3019
 
3020
        /* disable DPLL_SEL */
3021
        temp = I915_READ(PCH_DPLL_SEL);
3022
        switch (pipe) {
3023
        case 0:
3024
            temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3025
            break;
3026
        case 1:
3027
            temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3028
            break;
3029
        case 2:
3030
            /* FIXME: manage transcoder PLLs? */
3031
            temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3032
            break;
3033
        default:
3034
            BUG(); /* wtf */
3035
        }
3036
        I915_WRITE(PCH_DPLL_SEL, temp);
3037
    }
3038
 
3039
    /* disable PCH DPLL */
3040
    intel_disable_pch_pll(dev_priv, pipe);
3041
 
3042
    /* Switch from PCDclk to Rawclk */
3043
    reg = FDI_RX_CTL(pipe);
3044
    temp = I915_READ(reg);
3045
    I915_WRITE(reg, temp & ~FDI_PCDCLK);
3046
 
3047
    /* Disable CPU FDI TX PLL */
3048
    reg = FDI_TX_CTL(pipe);
3049
    temp = I915_READ(reg);
3050
    I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3051
 
3052
    POSTING_READ(reg);
3053
    udelay(100);
3054
 
3055
    reg = FDI_RX_CTL(pipe);
3056
    temp = I915_READ(reg);
3057
    I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3058
 
3059
    /* Wait for the clocks to turn off. */
3060
    POSTING_READ(reg);
3061
    udelay(100);
3062
 
3063
    intel_crtc->active = false;
3064
    intel_update_watermarks(dev);
3065
 
3066
    mutex_lock(&dev->struct_mutex);
3067
    intel_update_fbc(dev);
3068
    intel_clear_scanline_wait(dev);
3069
    mutex_unlock(&dev->struct_mutex);
2336 Serge 3070
 
3071
    LEAVE();
3072
 
2327 Serge 3073
}
3074
 
3075
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3076
{
3077
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3078
    int pipe = intel_crtc->pipe;
3079
    int plane = intel_crtc->plane;
3080
 
3081
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3082
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3083
     */
3084
    switch (mode) {
3085
    case DRM_MODE_DPMS_ON:
3086
    case DRM_MODE_DPMS_STANDBY:
3087
    case DRM_MODE_DPMS_SUSPEND:
3088
        DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3089
        ironlake_crtc_enable(crtc);
3090
        break;
3091
 
3092
    case DRM_MODE_DPMS_OFF:
3093
        DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3094
        ironlake_crtc_disable(crtc);
3095
        break;
3096
    }
3097
}
3098
 
3099
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3100
{
3101
	if (!enable && intel_crtc->overlay) {
3102
		struct drm_device *dev = intel_crtc->base.dev;
3103
		struct drm_i915_private *dev_priv = dev->dev_private;
3104
 
3105
		mutex_lock(&dev->struct_mutex);
3106
		dev_priv->mm.interruptible = false;
3107
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3108
		dev_priv->mm.interruptible = true;
3109
		mutex_unlock(&dev->struct_mutex);
3110
	}
3111
 
3112
	/* Let userspace switch the overlay on again. In most cases userspace
3113
	 * has to recompute where to put it anyway.
3114
	 */
3115
}
3116
 
3117
static void i9xx_crtc_enable(struct drm_crtc *crtc)
3118
{
3119
    struct drm_device *dev = crtc->dev;
3120
    struct drm_i915_private *dev_priv = dev->dev_private;
3121
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3122
    int pipe = intel_crtc->pipe;
3123
    int plane = intel_crtc->plane;
3124
 
3125
    if (intel_crtc->active)
3126
        return;
3127
 
3128
    intel_crtc->active = true;
3129
    intel_update_watermarks(dev);
3130
 
3131
    intel_enable_pll(dev_priv, pipe);
3132
    intel_enable_pipe(dev_priv, pipe, false);
3133
    intel_enable_plane(dev_priv, plane, pipe);
3134
 
3135
    intel_crtc_load_lut(crtc);
3136
    intel_update_fbc(dev);
3137
 
3138
    /* Give the overlay scaler a chance to enable if it's on this pipe */
3139
    intel_crtc_dpms_overlay(intel_crtc, true);
3140
//    intel_crtc_update_cursor(crtc, true);
3141
}
3142
 
3143
static void i9xx_crtc_disable(struct drm_crtc *crtc)
3144
{
3145
    struct drm_device *dev = crtc->dev;
3146
    struct drm_i915_private *dev_priv = dev->dev_private;
3147
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3148
    int pipe = intel_crtc->pipe;
3149
    int plane = intel_crtc->plane;
3150
 
3151
    if (!intel_crtc->active)
3152
        return;
3153
 
3154
    /* Give the overlay scaler a chance to disable if it's on this pipe */
3155
    intel_crtc_wait_for_pending_flips(crtc);
3156
//    drm_vblank_off(dev, pipe);
3157
    intel_crtc_dpms_overlay(intel_crtc, false);
3158
//    intel_crtc_update_cursor(crtc, false);
3159
 
3160
    if (dev_priv->cfb_plane == plane)
3161
        intel_disable_fbc(dev);
3162
 
3163
    intel_disable_plane(dev_priv, plane, pipe);
3164
    intel_disable_pipe(dev_priv, pipe);
3165
    intel_disable_pll(dev_priv, pipe);
3166
 
3167
    intel_crtc->active = false;
3168
    intel_update_fbc(dev);
3169
    intel_update_watermarks(dev);
3170
    intel_clear_scanline_wait(dev);
3171
}
3172
 
3173
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3174
{
3175
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3176
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3177
     */
3178
    switch (mode) {
3179
    case DRM_MODE_DPMS_ON:
3180
    case DRM_MODE_DPMS_STANDBY:
3181
    case DRM_MODE_DPMS_SUSPEND:
3182
        i9xx_crtc_enable(crtc);
3183
        break;
3184
    case DRM_MODE_DPMS_OFF:
3185
        i9xx_crtc_disable(crtc);
3186
        break;
3187
    }
3188
}
3189
 
2330 Serge 3190
/**
3191
 * Sets the power management mode of the pipe and plane.
3192
 */
3193
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3194
{
3195
	struct drm_device *dev = crtc->dev;
3196
	struct drm_i915_private *dev_priv = dev->dev_private;
3197
	struct drm_i915_master_private *master_priv;
3198
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3199
	int pipe = intel_crtc->pipe;
3200
	bool enabled;
2327 Serge 3201
 
2330 Serge 3202
	if (intel_crtc->dpms_mode == mode)
3203
		return;
2327 Serge 3204
 
2330 Serge 3205
	intel_crtc->dpms_mode = mode;
2327 Serge 3206
 
2330 Serge 3207
	dev_priv->display.dpms(crtc, mode);
2327 Serge 3208
 
2330 Serge 3209
	if (!dev->primary->master)
3210
		return;
2327 Serge 3211
 
2330 Serge 3212
	master_priv = dev->primary->master->driver_priv;
3213
	if (!master_priv->sarea_priv)
3214
		return;
2327 Serge 3215
 
2330 Serge 3216
	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
2327 Serge 3217
 
2330 Serge 3218
	switch (pipe) {
3219
	case 0:
3220
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3221
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3222
		break;
3223
	case 1:
3224
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3225
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3226
		break;
3227
	default:
3228
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3229
		break;
3230
	}
3231
}
2327 Serge 3232
 
2330 Serge 3233
static void intel_crtc_disable(struct drm_crtc *crtc)
3234
{
3235
	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3236
	struct drm_device *dev = crtc->dev;
2327 Serge 3237
 
2330 Serge 3238
	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
2327 Serge 3239
 
2330 Serge 3240
	if (crtc->fb) {
3241
		mutex_lock(&dev->struct_mutex);
3242
//		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
3243
		mutex_unlock(&dev->struct_mutex);
3244
	}
3245
}
2327 Serge 3246
 
2330 Serge 3247
/* Prepare for a mode set.
3248
 *
3249
 * Note we could be a lot smarter here.  We need to figure out which outputs
3250
 * will be enabled, which disabled (in short, how the config will changes)
3251
 * and perform the minimum necessary steps to accomplish that, e.g. updating
3252
 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3253
 * panel fitting is in the proper state, etc.
3254
 */
3255
static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3256
{
3257
	i9xx_crtc_disable(crtc);
3258
}
2327 Serge 3259
 
2330 Serge 3260
static void i9xx_crtc_commit(struct drm_crtc *crtc)
3261
{
3262
	i9xx_crtc_enable(crtc);
3263
}
2327 Serge 3264
 
2330 Serge 3265
static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3266
{
3267
	ironlake_crtc_disable(crtc);
3268
}
2327 Serge 3269
 
2330 Serge 3270
static void ironlake_crtc_commit(struct drm_crtc *crtc)
3271
{
3272
	ironlake_crtc_enable(crtc);
3273
}
2327 Serge 3274
 
2330 Serge 3275
void intel_encoder_prepare (struct drm_encoder *encoder)
3276
{
3277
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3278
	/* lvds has its own version of prepare see intel_lvds_prepare */
3279
	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3280
}
2327 Serge 3281
 
2330 Serge 3282
void intel_encoder_commit (struct drm_encoder *encoder)
3283
{
3284
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3285
	/* lvds has its own version of commit see intel_lvds_commit */
3286
	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3287
}
2327 Serge 3288
 
2330 Serge 3289
void intel_encoder_destroy(struct drm_encoder *encoder)
3290
{
3291
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3292
 
3293
	drm_encoder_cleanup(encoder);
3294
	kfree(intel_encoder);
3295
}
3296
 
3297
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3298
				  struct drm_display_mode *mode,
3299
				  struct drm_display_mode *adjusted_mode)
3300
{
3301
	struct drm_device *dev = crtc->dev;
3302
 
3303
	if (HAS_PCH_SPLIT(dev)) {
3304
		/* FDI link clock is fixed at 2.7G */
3305
		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3306
			return false;
3307
	}
3308
 
3309
	/* XXX some encoders set the crtcinfo, others don't.
3310
	 * Obviously we need some form of conflict resolution here...
3311
	 */
3312
	if (adjusted_mode->crtc_htotal == 0)
3313
		drm_mode_set_crtcinfo(adjusted_mode, 0);
3314
 
3315
	return true;
3316
}
3317
 
2327 Serge 3318
static int i945_get_display_clock_speed(struct drm_device *dev)
3319
{
3320
	return 400000;
3321
}
3322
 
3323
static int i915_get_display_clock_speed(struct drm_device *dev)
3324
{
3325
	return 333000;
3326
}
3327
 
3328
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3329
{
3330
	return 200000;
3331
}
3332
 
3333
static int i915gm_get_display_clock_speed(struct drm_device *dev)
3334
{
3335
	u16 gcfgc = 0;
3336
 
3337
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3338
 
3339
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3340
		return 133000;
3341
	else {
3342
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3343
		case GC_DISPLAY_CLOCK_333_MHZ:
3344
			return 333000;
3345
		default:
3346
		case GC_DISPLAY_CLOCK_190_200_MHZ:
3347
			return 190000;
3348
		}
3349
	}
3350
}
3351
 
3352
static int i865_get_display_clock_speed(struct drm_device *dev)
3353
{
3354
	return 266000;
3355
}
3356
 
3357
static int i855_get_display_clock_speed(struct drm_device *dev)
3358
{
3359
	u16 hpllcc = 0;
3360
	/* Assume that the hardware is in the high speed state.  This
3361
	 * should be the default.
3362
	 */
3363
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3364
	case GC_CLOCK_133_200:
3365
	case GC_CLOCK_100_200:
3366
		return 200000;
3367
	case GC_CLOCK_166_250:
3368
		return 250000;
3369
	case GC_CLOCK_100_133:
3370
		return 133000;
3371
	}
3372
 
3373
	/* Shouldn't happen */
3374
	return 0;
3375
}
3376
 
3377
static int i830_get_display_clock_speed(struct drm_device *dev)
3378
{
3379
	return 133000;
3380
}
3381
 
3382
struct fdi_m_n {
3383
    u32        tu;
3384
    u32        gmch_m;
3385
    u32        gmch_n;
3386
    u32        link_m;
3387
    u32        link_n;
3388
};
3389
 
3390
static void
3391
fdi_reduce_ratio(u32 *num, u32 *den)
3392
{
3393
	while (*num > 0xffffff || *den > 0xffffff) {
3394
		*num >>= 1;
3395
		*den >>= 1;
3396
	}
3397
}
3398
 
3399
static void
3400
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3401
		     int link_clock, struct fdi_m_n *m_n)
3402
{
3403
	m_n->tu = 64; /* default size */
3404
 
3405
	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3406
	m_n->gmch_m = bits_per_pixel * pixel_clock;
3407
	m_n->gmch_n = link_clock * nlanes * 8;
3408
	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3409
 
3410
	m_n->link_m = pixel_clock;
3411
	m_n->link_n = link_clock;
3412
	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3413
}
3414
 
3415
 
3416
struct intel_watermark_params {
3417
    unsigned long fifo_size;
3418
    unsigned long max_wm;
3419
    unsigned long default_wm;
3420
    unsigned long guard_size;
3421
    unsigned long cacheline_size;
3422
};
3423
 
3424
/* Pineview has different values for various configs */
3425
static const struct intel_watermark_params pineview_display_wm = {
3426
    PINEVIEW_DISPLAY_FIFO,
3427
    PINEVIEW_MAX_WM,
3428
    PINEVIEW_DFT_WM,
3429
    PINEVIEW_GUARD_WM,
3430
    PINEVIEW_FIFO_LINE_SIZE
3431
};
3432
static const struct intel_watermark_params pineview_display_hplloff_wm = {
3433
    PINEVIEW_DISPLAY_FIFO,
3434
    PINEVIEW_MAX_WM,
3435
    PINEVIEW_DFT_HPLLOFF_WM,
3436
    PINEVIEW_GUARD_WM,
3437
    PINEVIEW_FIFO_LINE_SIZE
3438
};
3439
static const struct intel_watermark_params pineview_cursor_wm = {
3440
    PINEVIEW_CURSOR_FIFO,
3441
    PINEVIEW_CURSOR_MAX_WM,
3442
    PINEVIEW_CURSOR_DFT_WM,
3443
    PINEVIEW_CURSOR_GUARD_WM,
3444
    PINEVIEW_FIFO_LINE_SIZE,
3445
};
3446
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3447
    PINEVIEW_CURSOR_FIFO,
3448
    PINEVIEW_CURSOR_MAX_WM,
3449
    PINEVIEW_CURSOR_DFT_WM,
3450
    PINEVIEW_CURSOR_GUARD_WM,
3451
    PINEVIEW_FIFO_LINE_SIZE
3452
};
3453
static const struct intel_watermark_params g4x_wm_info = {
3454
    G4X_FIFO_SIZE,
3455
    G4X_MAX_WM,
3456
    G4X_MAX_WM,
3457
    2,
3458
    G4X_FIFO_LINE_SIZE,
3459
};
3460
static const struct intel_watermark_params g4x_cursor_wm_info = {
3461
    I965_CURSOR_FIFO,
3462
    I965_CURSOR_MAX_WM,
3463
    I965_CURSOR_DFT_WM,
3464
    2,
3465
    G4X_FIFO_LINE_SIZE,
3466
};
3467
static const struct intel_watermark_params i965_cursor_wm_info = {
3468
    I965_CURSOR_FIFO,
3469
    I965_CURSOR_MAX_WM,
3470
    I965_CURSOR_DFT_WM,
3471
    2,
3472
    I915_FIFO_LINE_SIZE,
3473
};
3474
static const struct intel_watermark_params i945_wm_info = {
3475
    I945_FIFO_SIZE,
3476
    I915_MAX_WM,
3477
    1,
3478
    2,
3479
    I915_FIFO_LINE_SIZE
3480
};
3481
static const struct intel_watermark_params i915_wm_info = {
3482
    I915_FIFO_SIZE,
3483
    I915_MAX_WM,
3484
    1,
3485
    2,
3486
    I915_FIFO_LINE_SIZE
3487
};
3488
static const struct intel_watermark_params i855_wm_info = {
3489
    I855GM_FIFO_SIZE,
3490
    I915_MAX_WM,
3491
    1,
3492
    2,
3493
    I830_FIFO_LINE_SIZE
3494
};
3495
static const struct intel_watermark_params i830_wm_info = {
3496
    I830_FIFO_SIZE,
3497
    I915_MAX_WM,
3498
    1,
3499
    2,
3500
    I830_FIFO_LINE_SIZE
3501
};
3502
 
3503
static const struct intel_watermark_params ironlake_display_wm_info = {
3504
    ILK_DISPLAY_FIFO,
3505
    ILK_DISPLAY_MAXWM,
3506
    ILK_DISPLAY_DFTWM,
3507
    2,
3508
    ILK_FIFO_LINE_SIZE
3509
};
3510
static const struct intel_watermark_params ironlake_cursor_wm_info = {
3511
    ILK_CURSOR_FIFO,
3512
    ILK_CURSOR_MAXWM,
3513
    ILK_CURSOR_DFTWM,
3514
    2,
3515
    ILK_FIFO_LINE_SIZE
3516
};
3517
static const struct intel_watermark_params ironlake_display_srwm_info = {
3518
    ILK_DISPLAY_SR_FIFO,
3519
    ILK_DISPLAY_MAX_SRWM,
3520
    ILK_DISPLAY_DFT_SRWM,
3521
    2,
3522
    ILK_FIFO_LINE_SIZE
3523
};
3524
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3525
    ILK_CURSOR_SR_FIFO,
3526
    ILK_CURSOR_MAX_SRWM,
3527
    ILK_CURSOR_DFT_SRWM,
3528
    2,
3529
    ILK_FIFO_LINE_SIZE
3530
};
3531
 
3532
static const struct intel_watermark_params sandybridge_display_wm_info = {
3533
    SNB_DISPLAY_FIFO,
3534
    SNB_DISPLAY_MAXWM,
3535
    SNB_DISPLAY_DFTWM,
3536
    2,
3537
    SNB_FIFO_LINE_SIZE
3538
};
3539
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3540
    SNB_CURSOR_FIFO,
3541
    SNB_CURSOR_MAXWM,
3542
    SNB_CURSOR_DFTWM,
3543
    2,
3544
    SNB_FIFO_LINE_SIZE
3545
};
3546
static const struct intel_watermark_params sandybridge_display_srwm_info = {
3547
    SNB_DISPLAY_SR_FIFO,
3548
    SNB_DISPLAY_MAX_SRWM,
3549
    SNB_DISPLAY_DFT_SRWM,
3550
    2,
3551
    SNB_FIFO_LINE_SIZE
3552
};
3553
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3554
    SNB_CURSOR_SR_FIFO,
3555
    SNB_CURSOR_MAX_SRWM,
3556
    SNB_CURSOR_DFT_SRWM,
3557
    2,
3558
    SNB_FIFO_LINE_SIZE
3559
};
3560
 
3561
 
3562
/**
3563
 * intel_calculate_wm - calculate watermark level
3564
 * @clock_in_khz: pixel clock
3565
 * @wm: chip FIFO params
3566
 * @pixel_size: display pixel size
3567
 * @latency_ns: memory latency for the platform
3568
 *
3569
 * Calculate the watermark level (the level at which the display plane will
3570
 * start fetching from memory again).  Each chip has a different display
3571
 * FIFO size and allocation, so the caller needs to figure that out and pass
3572
 * in the correct intel_watermark_params structure.
3573
 *
3574
 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3575
 * on the pixel size.  When it reaches the watermark level, it'll start
3576
 * fetching FIFO line sized based chunks from memory until the FIFO fills
3577
 * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3578
 * will occur, and a display engine hang could result.
3579
 */
3580
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3581
                    const struct intel_watermark_params *wm,
3582
                    int fifo_size,
3583
                    int pixel_size,
3584
                    unsigned long latency_ns)
3585
{
3586
    long entries_required, wm_size;
3587
 
3588
    /*
3589
     * Note: we need to make sure we don't overflow for various clock &
3590
     * latency values.
3591
     * clocks go from a few thousand to several hundred thousand.
3592
     * latency is usually a few thousand
3593
     */
3594
    entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3595
        1000;
3596
    entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3597
 
3598
    DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3599
 
3600
    wm_size = fifo_size - (entries_required + wm->guard_size);
3601
 
3602
    DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3603
 
3604
    /* Don't promote wm_size to unsigned... */
3605
    if (wm_size > (long)wm->max_wm)
3606
        wm_size = wm->max_wm;
3607
    if (wm_size <= 0)
3608
        wm_size = wm->default_wm;
3609
    return wm_size;
3610
}
3611
 
3612
struct cxsr_latency {
3613
    int is_desktop;
3614
    int is_ddr3;
3615
    unsigned long fsb_freq;
3616
    unsigned long mem_freq;
3617
    unsigned long display_sr;
3618
    unsigned long display_hpll_disable;
3619
    unsigned long cursor_sr;
3620
    unsigned long cursor_hpll_disable;
3621
};
3622
 
3623
static const struct cxsr_latency cxsr_latency_table[] = {
3624
    {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3625
    {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3626
    {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3627
    {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3628
    {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3629
 
3630
    {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3631
    {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3632
    {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3633
    {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3634
    {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3635
 
3636
    {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3637
    {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3638
    {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3639
    {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3640
    {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3641
 
3642
    {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3643
    {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3644
    {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3645
    {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3646
    {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3647
 
3648
    {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3649
    {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3650
    {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3651
    {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3652
    {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3653
 
3654
    {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3655
    {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3656
    {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3657
    {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3658
    {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3659
};
3660
 
3661
static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3662
                             int is_ddr3,
3663
                             int fsb,
3664
                             int mem)
3665
{
3666
    const struct cxsr_latency *latency;
3667
    int i;
3668
 
3669
    if (fsb == 0 || mem == 0)
3670
        return NULL;
3671
 
3672
    for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3673
        latency = &cxsr_latency_table[i];
3674
        if (is_desktop == latency->is_desktop &&
3675
            is_ddr3 == latency->is_ddr3 &&
3676
            fsb == latency->fsb_freq && mem == latency->mem_freq)
3677
            return latency;
3678
    }
3679
 
3680
    DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3681
 
3682
    return NULL;
3683
}
3684
 
3685
static void pineview_disable_cxsr(struct drm_device *dev)
3686
{
3687
    struct drm_i915_private *dev_priv = dev->dev_private;
3688
 
3689
    /* deactivate cxsr */
3690
    I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3691
}
3692
 
3693
/*
3694
 * Latency for FIFO fetches is dependent on several factors:
3695
 *   - memory configuration (speed, channels)
3696
 *   - chipset
3697
 *   - current MCH state
3698
 * It can be fairly high in some situations, so here we assume a fairly
3699
 * pessimal value.  It's a tradeoff between extra memory fetches (if we
3700
 * set this value too high, the FIFO will fetch frequently to stay full)
3701
 * and power consumption (set it too low to save power and we might see
3702
 * FIFO underruns and display "flicker").
3703
 *
3704
 * A value of 5us seems to be a good balance; safe for very low end
3705
 * platforms but not overly aggressive on lower latency configs.
3706
 */
3707
static const int latency_ns = 5000;
3708
 
3709
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3710
{
3711
	struct drm_i915_private *dev_priv = dev->dev_private;
3712
	uint32_t dsparb = I915_READ(DSPARB);
3713
	int size;
3714
 
3715
	size = dsparb & 0x7f;
3716
	if (plane)
3717
		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3718
 
3719
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3720
		      plane ? "B" : "A", size);
3721
 
3722
	return size;
3723
}
3724
 
3725
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3726
{
3727
	struct drm_i915_private *dev_priv = dev->dev_private;
3728
	uint32_t dsparb = I915_READ(DSPARB);
3729
	int size;
3730
 
3731
	size = dsparb & 0x1ff;
3732
	if (plane)
3733
		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3734
	size >>= 1; /* Convert to cachelines */
3735
 
3736
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3737
		      plane ? "B" : "A", size);
3738
 
3739
	return size;
3740
}
3741
 
3742
static int i845_get_fifo_size(struct drm_device *dev, int plane)
3743
{
3744
	struct drm_i915_private *dev_priv = dev->dev_private;
3745
	uint32_t dsparb = I915_READ(DSPARB);
3746
	int size;
3747
 
3748
	size = dsparb & 0x7f;
3749
	size >>= 2; /* Convert to cachelines */
3750
 
3751
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3752
		      plane ? "B" : "A",
3753
		      size);
3754
 
3755
	return size;
3756
}
3757
 
3758
static int i830_get_fifo_size(struct drm_device *dev, int plane)
3759
{
3760
	struct drm_i915_private *dev_priv = dev->dev_private;
3761
	uint32_t dsparb = I915_READ(DSPARB);
3762
	int size;
3763
 
3764
	size = dsparb & 0x7f;
3765
	size >>= 1; /* Convert to cachelines */
3766
 
3767
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3768
		      plane ? "B" : "A", size);
3769
 
3770
	return size;
3771
}
3772
 
3773
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3774
{
3775
    struct drm_crtc *crtc, *enabled = NULL;
3776
 
3777
    list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3778
        if (crtc->enabled && crtc->fb) {
3779
            if (enabled)
3780
                return NULL;
3781
            enabled = crtc;
3782
        }
3783
    }
3784
 
3785
    return enabled;
3786
}
3787
 
3788
static void pineview_update_wm(struct drm_device *dev)
3789
{
3790
	struct drm_i915_private *dev_priv = dev->dev_private;
3791
	struct drm_crtc *crtc;
3792
	const struct cxsr_latency *latency;
3793
	u32 reg;
3794
	unsigned long wm;
3795
 
3796
	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3797
					 dev_priv->fsb_freq, dev_priv->mem_freq);
3798
	if (!latency) {
3799
		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3800
		pineview_disable_cxsr(dev);
3801
		return;
3802
	}
3803
 
3804
	crtc = single_enabled_crtc(dev);
3805
	if (crtc) {
3806
		int clock = crtc->mode.clock;
3807
		int pixel_size = crtc->fb->bits_per_pixel / 8;
3808
 
3809
		/* Display SR */
3810
		wm = intel_calculate_wm(clock, &pineview_display_wm,
3811
					pineview_display_wm.fifo_size,
3812
					pixel_size, latency->display_sr);
3813
		reg = I915_READ(DSPFW1);
3814
		reg &= ~DSPFW_SR_MASK;
3815
		reg |= wm << DSPFW_SR_SHIFT;
3816
		I915_WRITE(DSPFW1, reg);
3817
		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3818
 
3819
		/* cursor SR */
3820
		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3821
					pineview_display_wm.fifo_size,
3822
					pixel_size, latency->cursor_sr);
3823
		reg = I915_READ(DSPFW3);
3824
		reg &= ~DSPFW_CURSOR_SR_MASK;
3825
		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3826
		I915_WRITE(DSPFW3, reg);
3827
 
3828
		/* Display HPLL off SR */
3829
		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3830
					pineview_display_hplloff_wm.fifo_size,
3831
					pixel_size, latency->display_hpll_disable);
3832
		reg = I915_READ(DSPFW3);
3833
		reg &= ~DSPFW_HPLL_SR_MASK;
3834
		reg |= wm & DSPFW_HPLL_SR_MASK;
3835
		I915_WRITE(DSPFW3, reg);
3836
 
3837
		/* cursor HPLL off SR */
3838
		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
3839
					pineview_display_hplloff_wm.fifo_size,
3840
					pixel_size, latency->cursor_hpll_disable);
3841
		reg = I915_READ(DSPFW3);
3842
		reg &= ~DSPFW_HPLL_CURSOR_MASK;
3843
		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
3844
		I915_WRITE(DSPFW3, reg);
3845
		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
3846
 
3847
		/* activate cxsr */
3848
		I915_WRITE(DSPFW3,
3849
			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
3850
		DRM_DEBUG_KMS("Self-refresh is enabled\n");
3851
	} else {
3852
		pineview_disable_cxsr(dev);
3853
		DRM_DEBUG_KMS("Self-refresh is disabled\n");
3854
	}
3855
}
3856
 
3857
static bool g4x_compute_wm0(struct drm_device *dev,
3858
                int plane,
3859
                const struct intel_watermark_params *display,
3860
                int display_latency_ns,
3861
                const struct intel_watermark_params *cursor,
3862
                int cursor_latency_ns,
3863
                int *plane_wm,
3864
                int *cursor_wm)
3865
{
3866
    struct drm_crtc *crtc;
3867
    int htotal, hdisplay, clock, pixel_size;
3868
    int line_time_us, line_count;
3869
    int entries, tlb_miss;
3870
 
3871
    crtc = intel_get_crtc_for_plane(dev, plane);
3872
    if (crtc->fb == NULL || !crtc->enabled) {
3873
        *cursor_wm = cursor->guard_size;
3874
        *plane_wm = display->guard_size;
3875
        return false;
3876
    }
3877
 
3878
    htotal = crtc->mode.htotal;
3879
    hdisplay = crtc->mode.hdisplay;
3880
    clock = crtc->mode.clock;
3881
    pixel_size = crtc->fb->bits_per_pixel / 8;
3882
 
3883
    /* Use the small buffer method to calculate plane watermark */
3884
    entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3885
    tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3886
    if (tlb_miss > 0)
3887
        entries += tlb_miss;
3888
    entries = DIV_ROUND_UP(entries, display->cacheline_size);
3889
    *plane_wm = entries + display->guard_size;
3890
    if (*plane_wm > (int)display->max_wm)
3891
        *plane_wm = display->max_wm;
3892
 
3893
    /* Use the large buffer method to calculate cursor watermark */
3894
    line_time_us = ((htotal * 1000) / clock);
3895
    line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3896
    entries = line_count * 64 * pixel_size;
3897
    tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3898
    if (tlb_miss > 0)
3899
        entries += tlb_miss;
3900
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3901
    *cursor_wm = entries + cursor->guard_size;
3902
    if (*cursor_wm > (int)cursor->max_wm)
3903
        *cursor_wm = (int)cursor->max_wm;
3904
 
3905
    return true;
3906
}
3907
 
3908
/*
3909
 * Check the wm result.
3910
 *
3911
 * If any calculated watermark values is larger than the maximum value that
3912
 * can be programmed into the associated watermark register, that watermark
3913
 * must be disabled.
3914
 */
3915
static bool g4x_check_srwm(struct drm_device *dev,
3916
			   int display_wm, int cursor_wm,
3917
			   const struct intel_watermark_params *display,
3918
			   const struct intel_watermark_params *cursor)
3919
{
3920
	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
3921
		      display_wm, cursor_wm);
3922
 
3923
	if (display_wm > display->max_wm) {
3924
		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
3925
			      display_wm, display->max_wm);
3926
		return false;
3927
	}
3928
 
3929
	if (cursor_wm > cursor->max_wm) {
3930
		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
3931
			      cursor_wm, cursor->max_wm);
3932
		return false;
3933
	}
3934
 
3935
	if (!(display_wm || cursor_wm)) {
3936
		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
3937
		return false;
3938
	}
3939
 
3940
	return true;
3941
}
3942
 
3943
static bool g4x_compute_srwm(struct drm_device *dev,
3944
			     int plane,
3945
			     int latency_ns,
3946
			     const struct intel_watermark_params *display,
3947
			     const struct intel_watermark_params *cursor,
3948
			     int *display_wm, int *cursor_wm)
3949
{
3950
	struct drm_crtc *crtc;
3951
	int hdisplay, htotal, pixel_size, clock;
3952
	unsigned long line_time_us;
3953
	int line_count, line_size;
3954
	int small, large;
3955
	int entries;
3956
 
3957
	if (!latency_ns) {
3958
		*display_wm = *cursor_wm = 0;
3959
		return false;
3960
	}
3961
 
3962
	crtc = intel_get_crtc_for_plane(dev, plane);
3963
	hdisplay = crtc->mode.hdisplay;
3964
	htotal = crtc->mode.htotal;
3965
	clock = crtc->mode.clock;
3966
	pixel_size = crtc->fb->bits_per_pixel / 8;
3967
 
3968
	line_time_us = (htotal * 1000) / clock;
3969
	line_count = (latency_ns / line_time_us + 1000) / 1000;
3970
	line_size = hdisplay * pixel_size;
3971
 
3972
	/* Use the minimum of the small and large buffer method for primary */
3973
	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3974
	large = line_count * line_size;
3975
 
3976
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3977
	*display_wm = entries + display->guard_size;
3978
 
3979
	/* calculate the self-refresh watermark for display cursor */
3980
	entries = line_count * pixel_size * 64;
3981
	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3982
	*cursor_wm = entries + cursor->guard_size;
3983
 
3984
	return g4x_check_srwm(dev,
3985
			      *display_wm, *cursor_wm,
3986
			      display, cursor);
3987
}
3988
 
3989
#define single_plane_enabled(mask) is_power_of_2(mask)
3990
 
3991
static void g4x_update_wm(struct drm_device *dev)
3992
{
3993
	static const int sr_latency_ns = 12000;
3994
	struct drm_i915_private *dev_priv = dev->dev_private;
3995
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3996
	int plane_sr, cursor_sr;
3997
	unsigned int enabled = 0;
3998
 
3999
	if (g4x_compute_wm0(dev, 0,
4000
			    &g4x_wm_info, latency_ns,
4001
			    &g4x_cursor_wm_info, latency_ns,
4002
			    &planea_wm, &cursora_wm))
4003
		enabled |= 1;
4004
 
4005
	if (g4x_compute_wm0(dev, 1,
4006
			    &g4x_wm_info, latency_ns,
4007
			    &g4x_cursor_wm_info, latency_ns,
4008
			    &planeb_wm, &cursorb_wm))
4009
		enabled |= 2;
4010
 
4011
	plane_sr = cursor_sr = 0;
4012
	if (single_plane_enabled(enabled) &&
4013
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
4014
			     sr_latency_ns,
4015
			     &g4x_wm_info,
4016
			     &g4x_cursor_wm_info,
4017
			     &plane_sr, &cursor_sr))
4018
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4019
	else
4020
		I915_WRITE(FW_BLC_SELF,
4021
			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4022
 
4023
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4024
		      planea_wm, cursora_wm,
4025
		      planeb_wm, cursorb_wm,
4026
		      plane_sr, cursor_sr);
4027
 
4028
	I915_WRITE(DSPFW1,
4029
		   (plane_sr << DSPFW_SR_SHIFT) |
4030
		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4031
		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
4032
		   planea_wm);
4033
	I915_WRITE(DSPFW2,
4034
		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4035
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
4036
	/* HPLL off in SR has some issues on G4x... disable it */
4037
	I915_WRITE(DSPFW3,
4038
		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4039
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4040
}
4041
 
4042
static void i965_update_wm(struct drm_device *dev)
4043
{
4044
	struct drm_i915_private *dev_priv = dev->dev_private;
4045
	struct drm_crtc *crtc;
4046
	int srwm = 1;
4047
	int cursor_sr = 16;
4048
 
4049
	/* Calc sr entries for one plane configs */
4050
	crtc = single_enabled_crtc(dev);
4051
	if (crtc) {
4052
		/* self-refresh has much higher latency */
4053
		static const int sr_latency_ns = 12000;
4054
		int clock = crtc->mode.clock;
4055
		int htotal = crtc->mode.htotal;
4056
		int hdisplay = crtc->mode.hdisplay;
4057
		int pixel_size = crtc->fb->bits_per_pixel / 8;
4058
		unsigned long line_time_us;
4059
		int entries;
4060
 
4061
		line_time_us = ((htotal * 1000) / clock);
4062
 
4063
		/* Use ns/us then divide to preserve precision */
4064
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4065
			pixel_size * hdisplay;
4066
		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4067
		srwm = I965_FIFO_SIZE - entries;
4068
		if (srwm < 0)
4069
			srwm = 1;
4070
		srwm &= 0x1ff;
4071
		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4072
			      entries, srwm);
4073
 
4074
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4075
			pixel_size * 64;
4076
		entries = DIV_ROUND_UP(entries,
4077
					  i965_cursor_wm_info.cacheline_size);
4078
		cursor_sr = i965_cursor_wm_info.fifo_size -
4079
			(entries + i965_cursor_wm_info.guard_size);
4080
 
4081
		if (cursor_sr > i965_cursor_wm_info.max_wm)
4082
			cursor_sr = i965_cursor_wm_info.max_wm;
4083
 
4084
		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4085
			      "cursor %d\n", srwm, cursor_sr);
4086
 
4087
		if (IS_CRESTLINE(dev))
4088
			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4089
	} else {
4090
		/* Turn off self refresh if both pipes are enabled */
4091
		if (IS_CRESTLINE(dev))
4092
			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4093
				   & ~FW_BLC_SELF_EN);
4094
	}
4095
 
4096
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4097
		      srwm);
4098
 
4099
	/* 965 has limitations... */
4100
	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4101
		   (8 << 16) | (8 << 8) | (8 << 0));
4102
	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4103
	/* update cursor SR watermark */
4104
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4105
}
4106
 
4107
static void i9xx_update_wm(struct drm_device *dev)
4108
{
4109
	struct drm_i915_private *dev_priv = dev->dev_private;
4110
	const struct intel_watermark_params *wm_info;
4111
	uint32_t fwater_lo;
4112
	uint32_t fwater_hi;
4113
	int cwm, srwm = 1;
4114
	int fifo_size;
4115
	int planea_wm, planeb_wm;
4116
	struct drm_crtc *crtc, *enabled = NULL;
4117
 
4118
	if (IS_I945GM(dev))
4119
		wm_info = &i945_wm_info;
4120
	else if (!IS_GEN2(dev))
4121
		wm_info = &i915_wm_info;
4122
	else
4123
		wm_info = &i855_wm_info;
4124
 
4125
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4126
	crtc = intel_get_crtc_for_plane(dev, 0);
4127
	if (crtc->enabled && crtc->fb) {
4128
		planea_wm = intel_calculate_wm(crtc->mode.clock,
4129
					       wm_info, fifo_size,
4130
					       crtc->fb->bits_per_pixel / 8,
4131
					       latency_ns);
4132
		enabled = crtc;
4133
	} else
4134
		planea_wm = fifo_size - wm_info->guard_size;
4135
 
4136
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4137
	crtc = intel_get_crtc_for_plane(dev, 1);
4138
	if (crtc->enabled && crtc->fb) {
4139
		planeb_wm = intel_calculate_wm(crtc->mode.clock,
4140
					       wm_info, fifo_size,
4141
					       crtc->fb->bits_per_pixel / 8,
4142
					       latency_ns);
4143
		if (enabled == NULL)
4144
			enabled = crtc;
4145
		else
4146
			enabled = NULL;
4147
	} else
4148
		planeb_wm = fifo_size - wm_info->guard_size;
4149
 
4150
	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4151
 
4152
	/*
4153
	 * Overlay gets an aggressive default since video jitter is bad.
4154
	 */
4155
	cwm = 2;
4156
 
4157
	/* Play safe and disable self-refresh before adjusting watermarks. */
4158
	if (IS_I945G(dev) || IS_I945GM(dev))
4159
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4160
	else if (IS_I915GM(dev))
4161
		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4162
 
4163
	/* Calc sr entries for one plane configs */
4164
	if (HAS_FW_BLC(dev) && enabled) {
4165
		/* self-refresh has much higher latency */
4166
		static const int sr_latency_ns = 6000;
4167
		int clock = enabled->mode.clock;
4168
		int htotal = enabled->mode.htotal;
4169
		int hdisplay = enabled->mode.hdisplay;
4170
		int pixel_size = enabled->fb->bits_per_pixel / 8;
4171
		unsigned long line_time_us;
4172
		int entries;
4173
 
4174
		line_time_us = (htotal * 1000) / clock;
4175
 
4176
		/* Use ns/us then divide to preserve precision */
4177
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4178
			pixel_size * hdisplay;
4179
		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4180
		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4181
		srwm = wm_info->fifo_size - entries;
4182
		if (srwm < 0)
4183
			srwm = 1;
4184
 
4185
		if (IS_I945G(dev) || IS_I945GM(dev))
4186
			I915_WRITE(FW_BLC_SELF,
4187
				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4188
		else if (IS_I915GM(dev))
4189
			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4190
	}
4191
 
4192
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4193
		      planea_wm, planeb_wm, cwm, srwm);
4194
 
4195
	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4196
	fwater_hi = (cwm & 0x1f);
4197
 
4198
	/* Set request length to 8 cachelines per fetch */
4199
	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4200
	fwater_hi = fwater_hi | (1 << 8);
4201
 
4202
	I915_WRITE(FW_BLC, fwater_lo);
4203
	I915_WRITE(FW_BLC2, fwater_hi);
4204
 
4205
	if (HAS_FW_BLC(dev)) {
4206
		if (enabled) {
4207
			if (IS_I945G(dev) || IS_I945GM(dev))
4208
				I915_WRITE(FW_BLC_SELF,
4209
					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4210
			else if (IS_I915GM(dev))
4211
				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4212
			DRM_DEBUG_KMS("memory self refresh enabled\n");
4213
		} else
4214
			DRM_DEBUG_KMS("memory self refresh disabled\n");
4215
	}
4216
}
4217
 
4218
static void i830_update_wm(struct drm_device *dev)
4219
{
4220
	struct drm_i915_private *dev_priv = dev->dev_private;
4221
	struct drm_crtc *crtc;
4222
	uint32_t fwater_lo;
4223
	int planea_wm;
4224
 
4225
	crtc = single_enabled_crtc(dev);
4226
	if (crtc == NULL)
4227
		return;
4228
 
4229
	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4230
				       dev_priv->display.get_fifo_size(dev, 0),
4231
				       crtc->fb->bits_per_pixel / 8,
4232
				       latency_ns);
4233
	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4234
	fwater_lo |= (3<<8) | planea_wm;
4235
 
4236
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4237
 
4238
	I915_WRITE(FW_BLC, fwater_lo);
4239
}
4240
 
4241
#define ILK_LP0_PLANE_LATENCY		700
4242
#define ILK_LP0_CURSOR_LATENCY		1300
4243
 
4244
/*
4245
 * Check the wm result.
4246
 *
4247
 * If any calculated watermark values is larger than the maximum value that
4248
 * can be programmed into the associated watermark register, that watermark
4249
 * must be disabled.
4250
 */
4251
static bool ironlake_check_srwm(struct drm_device *dev, int level,
4252
				int fbc_wm, int display_wm, int cursor_wm,
4253
				const struct intel_watermark_params *display,
4254
				const struct intel_watermark_params *cursor)
4255
{
4256
	struct drm_i915_private *dev_priv = dev->dev_private;
4257
 
4258
	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4259
		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4260
 
4261
	if (fbc_wm > SNB_FBC_MAX_SRWM) {
4262
		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4263
			      fbc_wm, SNB_FBC_MAX_SRWM, level);
4264
 
4265
		/* fbc has it's own way to disable FBC WM */
4266
		I915_WRITE(DISP_ARB_CTL,
4267
			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4268
		return false;
4269
	}
4270
 
4271
	if (display_wm > display->max_wm) {
4272
		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4273
			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
4274
		return false;
4275
	}
4276
 
4277
	if (cursor_wm > cursor->max_wm) {
4278
		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4279
			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4280
		return false;
4281
	}
4282
 
4283
	if (!(fbc_wm || display_wm || cursor_wm)) {
4284
		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4285
		return false;
4286
	}
4287
 
4288
	return true;
4289
}
4290
 
4291
/*
4292
 * Compute watermark values of WM[1-3],
4293
 */
4294
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4295
                  int latency_ns,
4296
                  const struct intel_watermark_params *display,
4297
                  const struct intel_watermark_params *cursor,
4298
                  int *fbc_wm, int *display_wm, int *cursor_wm)
4299
{
4300
    struct drm_crtc *crtc;
4301
    unsigned long line_time_us;
4302
    int hdisplay, htotal, pixel_size, clock;
4303
    int line_count, line_size;
4304
    int small, large;
4305
    int entries;
4306
 
4307
    if (!latency_ns) {
4308
        *fbc_wm = *display_wm = *cursor_wm = 0;
4309
        return false;
4310
    }
4311
 
4312
    crtc = intel_get_crtc_for_plane(dev, plane);
4313
    hdisplay = crtc->mode.hdisplay;
4314
    htotal = crtc->mode.htotal;
4315
    clock = crtc->mode.clock;
4316
    pixel_size = crtc->fb->bits_per_pixel / 8;
4317
 
4318
    line_time_us = (htotal * 1000) / clock;
4319
    line_count = (latency_ns / line_time_us + 1000) / 1000;
4320
    line_size = hdisplay * pixel_size;
4321
 
4322
    /* Use the minimum of the small and large buffer method for primary */
4323
    small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4324
    large = line_count * line_size;
4325
 
4326
    entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4327
    *display_wm = entries + display->guard_size;
4328
 
4329
    /*
4330
     * Spec says:
4331
     * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4332
     */
4333
    *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4334
 
4335
    /* calculate the self-refresh watermark for display cursor */
4336
    entries = line_count * pixel_size * 64;
4337
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4338
    *cursor_wm = entries + cursor->guard_size;
4339
 
4340
    return ironlake_check_srwm(dev, level,
4341
                   *fbc_wm, *display_wm, *cursor_wm,
4342
                   display, cursor);
4343
}
4344
 
4345
static void ironlake_update_wm(struct drm_device *dev)
4346
{
4347
	struct drm_i915_private *dev_priv = dev->dev_private;
4348
	int fbc_wm, plane_wm, cursor_wm;
4349
	unsigned int enabled;
4350
 
4351
	enabled = 0;
4352
	if (g4x_compute_wm0(dev, 0,
4353
			    &ironlake_display_wm_info,
4354
			    ILK_LP0_PLANE_LATENCY,
4355
			    &ironlake_cursor_wm_info,
4356
			    ILK_LP0_CURSOR_LATENCY,
4357
			    &plane_wm, &cursor_wm)) {
4358
		I915_WRITE(WM0_PIPEA_ILK,
4359
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4360
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4361
			      " plane %d, " "cursor: %d\n",
4362
			      plane_wm, cursor_wm);
4363
		enabled |= 1;
4364
	}
4365
 
4366
	if (g4x_compute_wm0(dev, 1,
4367
			    &ironlake_display_wm_info,
4368
			    ILK_LP0_PLANE_LATENCY,
4369
			    &ironlake_cursor_wm_info,
4370
			    ILK_LP0_CURSOR_LATENCY,
4371
			    &plane_wm, &cursor_wm)) {
4372
		I915_WRITE(WM0_PIPEB_ILK,
4373
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4374
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4375
			      " plane %d, cursor: %d\n",
4376
			      plane_wm, cursor_wm);
4377
		enabled |= 2;
4378
	}
4379
 
4380
	/*
4381
	 * Calculate and update the self-refresh watermark only when one
4382
	 * display plane is used.
4383
	 */
4384
	I915_WRITE(WM3_LP_ILK, 0);
4385
	I915_WRITE(WM2_LP_ILK, 0);
4386
	I915_WRITE(WM1_LP_ILK, 0);
4387
 
4388
	if (!single_plane_enabled(enabled))
4389
		return;
4390
	enabled = ffs(enabled) - 1;
4391
 
4392
	/* WM1 */
4393
	if (!ironlake_compute_srwm(dev, 1, enabled,
4394
				   ILK_READ_WM1_LATENCY() * 500,
4395
				   &ironlake_display_srwm_info,
4396
				   &ironlake_cursor_srwm_info,
4397
				   &fbc_wm, &plane_wm, &cursor_wm))
4398
		return;
4399
 
4400
	I915_WRITE(WM1_LP_ILK,
4401
		   WM1_LP_SR_EN |
4402
		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4403
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4404
		   (plane_wm << WM1_LP_SR_SHIFT) |
4405
		   cursor_wm);
4406
 
4407
	/* WM2 */
4408
	if (!ironlake_compute_srwm(dev, 2, enabled,
4409
				   ILK_READ_WM2_LATENCY() * 500,
4410
				   &ironlake_display_srwm_info,
4411
				   &ironlake_cursor_srwm_info,
4412
				   &fbc_wm, &plane_wm, &cursor_wm))
4413
		return;
4414
 
4415
	I915_WRITE(WM2_LP_ILK,
4416
		   WM2_LP_EN |
4417
		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4418
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4419
		   (plane_wm << WM1_LP_SR_SHIFT) |
4420
		   cursor_wm);
4421
 
4422
	/*
4423
	 * WM3 is unsupported on ILK, probably because we don't have latency
4424
	 * data for that power state
4425
	 */
4426
}
4427
 
4428
static void sandybridge_update_wm(struct drm_device *dev)
4429
{
4430
	struct drm_i915_private *dev_priv = dev->dev_private;
4431
	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4432
	int fbc_wm, plane_wm, cursor_wm;
4433
	unsigned int enabled;
4434
 
2336 Serge 4435
    ENTER();
4436
 
2327 Serge 4437
	enabled = 0;
4438
	if (g4x_compute_wm0(dev, 0,
4439
			    &sandybridge_display_wm_info, latency,
4440
			    &sandybridge_cursor_wm_info, latency,
4441
			    &plane_wm, &cursor_wm)) {
4442
		I915_WRITE(WM0_PIPEA_ILK,
4443
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4444
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4445
			      " plane %d, " "cursor: %d\n",
4446
			      plane_wm, cursor_wm);
4447
		enabled |= 1;
4448
	}
4449
 
4450
	if (g4x_compute_wm0(dev, 1,
4451
			    &sandybridge_display_wm_info, latency,
4452
			    &sandybridge_cursor_wm_info, latency,
4453
			    &plane_wm, &cursor_wm)) {
4454
		I915_WRITE(WM0_PIPEB_ILK,
4455
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4456
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4457
			      " plane %d, cursor: %d\n",
4458
			      plane_wm, cursor_wm);
4459
		enabled |= 2;
4460
	}
4461
 
4462
	/*
4463
	 * Calculate and update the self-refresh watermark only when one
4464
	 * display plane is used.
4465
	 *
4466
	 * SNB support 3 levels of watermark.
4467
	 *
4468
	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4469
	 * and disabled in the descending order
4470
	 *
4471
	 */
4472
	I915_WRITE(WM3_LP_ILK, 0);
4473
	I915_WRITE(WM2_LP_ILK, 0);
4474
	I915_WRITE(WM1_LP_ILK, 0);
4475
 
4476
	if (!single_plane_enabled(enabled))
2336 Serge 4477
    {
4478
        LEAVE();
2327 Serge 4479
		return;
2336 Serge 4480
    };
4481
 
2327 Serge 4482
	enabled = ffs(enabled) - 1;
4483
 
2336 Serge 4484
    dbgprintf("compute wm1\n");
4485
 
2327 Serge 4486
	/* WM1 */
4487
	if (!ironlake_compute_srwm(dev, 1, enabled,
4488
				   SNB_READ_WM1_LATENCY() * 500,
4489
				   &sandybridge_display_srwm_info,
4490
				   &sandybridge_cursor_srwm_info,
4491
				   &fbc_wm, &plane_wm, &cursor_wm))
4492
		return;
4493
 
4494
	I915_WRITE(WM1_LP_ILK,
4495
		   WM1_LP_SR_EN |
4496
		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4497
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4498
		   (plane_wm << WM1_LP_SR_SHIFT) |
4499
		   cursor_wm);
4500
 
2336 Serge 4501
    dbgprintf("compute wm2\n");
4502
 
2327 Serge 4503
	/* WM2 */
4504
	if (!ironlake_compute_srwm(dev, 2, enabled,
4505
				   SNB_READ_WM2_LATENCY() * 500,
4506
				   &sandybridge_display_srwm_info,
4507
				   &sandybridge_cursor_srwm_info,
4508
				   &fbc_wm, &plane_wm, &cursor_wm))
4509
		return;
4510
 
4511
	I915_WRITE(WM2_LP_ILK,
4512
		   WM2_LP_EN |
4513
		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4514
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4515
		   (plane_wm << WM1_LP_SR_SHIFT) |
4516
		   cursor_wm);
4517
 
2336 Serge 4518
    dbgprintf("compute wm3\n");
4519
 
2327 Serge 4520
	/* WM3 */
4521
	if (!ironlake_compute_srwm(dev, 3, enabled,
4522
				   SNB_READ_WM3_LATENCY() * 500,
4523
				   &sandybridge_display_srwm_info,
4524
				   &sandybridge_cursor_srwm_info,
4525
				   &fbc_wm, &plane_wm, &cursor_wm))
4526
		return;
4527
 
4528
	I915_WRITE(WM3_LP_ILK,
4529
		   WM3_LP_EN |
4530
		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4531
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4532
		   (plane_wm << WM1_LP_SR_SHIFT) |
4533
		   cursor_wm);
2336 Serge 4534
 
4535
    LEAVE();
4536
 
2327 Serge 4537
}
4538
 
4539
/**
4540
 * intel_update_watermarks - update FIFO watermark values based on current modes
4541
 *
4542
 * Calculate watermark values for the various WM regs based on current mode
4543
 * and plane configuration.
4544
 *
4545
 * There are several cases to deal with here:
4546
 *   - normal (i.e. non-self-refresh)
4547
 *   - self-refresh (SR) mode
4548
 *   - lines are large relative to FIFO size (buffer can hold up to 2)
4549
 *   - lines are small relative to FIFO size (buffer can hold more than 2
4550
 *     lines), so need to account for TLB latency
4551
 *
4552
 *   The normal calculation is:
4553
 *     watermark = dotclock * bytes per pixel * latency
4554
 *   where latency is platform & configuration dependent (we assume pessimal
4555
 *   values here).
4556
 *
4557
 *   The SR calculation is:
4558
 *     watermark = (trunc(latency/line time)+1) * surface width *
4559
 *       bytes per pixel
4560
 *   where
4561
 *     line time = htotal / dotclock
4562
 *     surface width = hdisplay for normal plane and 64 for cursor
4563
 *   and latency is assumed to be high, as above.
4564
 *
4565
 * The final value programmed to the register should always be rounded up,
4566
 * and include an extra 2 entries to account for clock crossings.
4567
 *
4568
 * We don't use the sprite, so we can ignore that.  And on Crestline we have
4569
 * to set the non-SR watermarks to 8.
4570
 */
4571
static void intel_update_watermarks(struct drm_device *dev)
4572
{
4573
	struct drm_i915_private *dev_priv = dev->dev_private;
2336 Serge 4574
    ENTER();
2327 Serge 4575
	if (dev_priv->display.update_wm)
4576
		dev_priv->display.update_wm(dev);
2336 Serge 4577
    LEAVE();
2327 Serge 4578
}
4579
 
4580
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4581
{
4582
	return dev_priv->lvds_use_ssc && i915_panel_use_ssc
4583
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4584
}
4585
 
4586
/**
4587
 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4588
 * @crtc: CRTC structure
4589
 *
4590
 * A pipe may be connected to one or more outputs.  Based on the depth of the
4591
 * attached framebuffer, choose a good color depth to use on the pipe.
4592
 *
4593
 * If possible, match the pipe depth to the fb depth.  In some cases, this
4594
 * isn't ideal, because the connected output supports a lesser or restricted
4595
 * set of depths.  Resolve that here:
4596
 *    LVDS typically supports only 6bpc, so clamp down in that case
4597
 *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4598
 *    Displays may support a restricted set as well, check EDID and clamp as
4599
 *      appropriate.
4600
 *
4601
 * RETURNS:
4602
 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4603
 * true if they don't match).
4604
 */
4605
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4606
					 unsigned int *pipe_bpp)
4607
{
4608
	struct drm_device *dev = crtc->dev;
4609
	struct drm_i915_private *dev_priv = dev->dev_private;
4610
	struct drm_encoder *encoder;
4611
	struct drm_connector *connector;
4612
	unsigned int display_bpc = UINT_MAX, bpc;
4613
 
4614
	/* Walk the encoders & connectors on this crtc, get min bpc */
4615
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4616
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4617
 
4618
		if (encoder->crtc != crtc)
4619
			continue;
4620
 
4621
		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4622
			unsigned int lvds_bpc;
4623
 
4624
			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4625
			    LVDS_A3_POWER_UP)
4626
				lvds_bpc = 8;
4627
			else
4628
				lvds_bpc = 6;
4629
 
4630
			if (lvds_bpc < display_bpc) {
4631
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4632
				display_bpc = lvds_bpc;
4633
			}
4634
			continue;
4635
		}
4636
 
4637
		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4638
			/* Use VBT settings if we have an eDP panel */
4639
			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4640
 
4641
			if (edp_bpc < display_bpc) {
4642
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4643
				display_bpc = edp_bpc;
4644
			}
4645
			continue;
4646
		}
4647
 
4648
		/* Not one of the known troublemakers, check the EDID */
4649
		list_for_each_entry(connector, &dev->mode_config.connector_list,
4650
				    head) {
4651
			if (connector->encoder != encoder)
4652
				continue;
4653
 
4654
			/* Don't use an invalid EDID bpc value */
4655
			if (connector->display_info.bpc &&
4656
			    connector->display_info.bpc < display_bpc) {
4657
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4658
				display_bpc = connector->display_info.bpc;
4659
			}
4660
		}
4661
 
4662
		/*
4663
		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4664
		 * through, clamp it down.  (Note: >12bpc will be caught below.)
4665
		 */
4666
		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4667
			if (display_bpc > 8 && display_bpc < 12) {
4668
				DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
4669
				display_bpc = 12;
4670
			} else {
4671
				DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
4672
				display_bpc = 8;
4673
			}
4674
		}
4675
	}
4676
 
4677
	/*
4678
	 * We could just drive the pipe at the highest bpc all the time and
4679
	 * enable dithering as needed, but that costs bandwidth.  So choose
4680
	 * the minimum value that expresses the full color range of the fb but
4681
	 * also stays within the max display bpc discovered above.
4682
	 */
4683
 
4684
	switch (crtc->fb->depth) {
4685
	case 8:
4686
		bpc = 8; /* since we go through a colormap */
4687
		break;
4688
	case 15:
4689
	case 16:
4690
		bpc = 6; /* min is 18bpp */
4691
		break;
4692
	case 24:
4693
		bpc = min((unsigned int)8, display_bpc);
4694
		break;
4695
	case 30:
4696
		bpc = min((unsigned int)10, display_bpc);
4697
		break;
4698
	case 48:
4699
		bpc = min((unsigned int)12, display_bpc);
4700
		break;
4701
	default:
4702
		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4703
		bpc = min((unsigned int)8, display_bpc);
4704
		break;
4705
	}
4706
 
4707
	DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4708
			 bpc, display_bpc);
4709
 
4710
	*pipe_bpp = bpc * 3;
4711
 
4712
	return display_bpc != bpc;
4713
}
4714
 
4715
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4716
                  struct drm_display_mode *mode,
4717
                  struct drm_display_mode *adjusted_mode,
4718
                  int x, int y,
4719
                  struct drm_framebuffer *old_fb)
4720
{
4721
    struct drm_device *dev = crtc->dev;
4722
    struct drm_i915_private *dev_priv = dev->dev_private;
4723
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4724
    int pipe = intel_crtc->pipe;
4725
    int plane = intel_crtc->plane;
4726
    int refclk, num_connectors = 0;
4727
    intel_clock_t clock, reduced_clock;
4728
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4729
    bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4730
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4731
    struct drm_mode_config *mode_config = &dev->mode_config;
4732
    struct intel_encoder *encoder;
4733
    const intel_limit_t *limit;
4734
    int ret;
4735
    u32 temp;
4736
    u32 lvds_sync = 0;
4737
 
4738
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4739
        if (encoder->base.crtc != crtc)
4740
            continue;
4741
 
4742
        switch (encoder->type) {
4743
        case INTEL_OUTPUT_LVDS:
4744
            is_lvds = true;
4745
            break;
4746
        case INTEL_OUTPUT_SDVO:
4747
        case INTEL_OUTPUT_HDMI:
4748
            is_sdvo = true;
4749
            if (encoder->needs_tv_clock)
4750
                is_tv = true;
4751
            break;
4752
        case INTEL_OUTPUT_DVO:
4753
            is_dvo = true;
4754
            break;
4755
        case INTEL_OUTPUT_TVOUT:
4756
            is_tv = true;
4757
            break;
4758
        case INTEL_OUTPUT_ANALOG:
4759
            is_crt = true;
4760
            break;
4761
        case INTEL_OUTPUT_DISPLAYPORT:
4762
            is_dp = true;
4763
            break;
4764
        }
4765
 
4766
        num_connectors++;
4767
    }
4768
 
4769
    if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4770
        refclk = dev_priv->lvds_ssc_freq * 1000;
4771
        DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4772
                  refclk / 1000);
4773
    } else if (!IS_GEN2(dev)) {
4774
        refclk = 96000;
4775
    } else {
4776
        refclk = 48000;
4777
    }
4778
 
4779
    /*
4780
     * Returns a set of divisors for the desired target clock with the given
4781
     * refclk, or FALSE.  The returned values represent the clock equation:
4782
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4783
     */
4784
    limit = intel_limit(crtc, refclk);
4785
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4786
    if (!ok) {
4787
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
4788
        return -EINVAL;
4789
    }
4790
 
4791
    /* Ensure that the cursor is valid for the new mode before changing... */
4792
//    intel_crtc_update_cursor(crtc, true);
4793
 
4794
    if (is_lvds && dev_priv->lvds_downclock_avail) {
4795
        has_reduced_clock = limit->find_pll(limit, crtc,
4796
                            dev_priv->lvds_downclock,
4797
                            refclk,
4798
                            &reduced_clock);
4799
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
4800
            /*
4801
             * If the different P is found, it means that we can't
4802
             * switch the display clock by using the FP0/FP1.
4803
             * In such case we will disable the LVDS downclock
4804
             * feature.
4805
             */
4806
            DRM_DEBUG_KMS("Different P is found for "
4807
                      "LVDS clock/downclock\n");
4808
            has_reduced_clock = 0;
4809
        }
4810
    }
4811
    /* SDVO TV has fixed PLL values depend on its clock range,
4812
       this mirrors vbios setting. */
4813
    if (is_sdvo && is_tv) {
4814
        if (adjusted_mode->clock >= 100000
4815
            && adjusted_mode->clock < 140500) {
4816
            clock.p1 = 2;
4817
            clock.p2 = 10;
4818
            clock.n = 3;
4819
            clock.m1 = 16;
4820
            clock.m2 = 8;
4821
        } else if (adjusted_mode->clock >= 140500
4822
               && adjusted_mode->clock <= 200000) {
4823
            clock.p1 = 1;
4824
            clock.p2 = 10;
4825
            clock.n = 6;
4826
            clock.m1 = 12;
4827
            clock.m2 = 8;
4828
        }
4829
    }
4830
 
4831
    if (IS_PINEVIEW(dev)) {
4832
        fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
4833
        if (has_reduced_clock)
4834
            fp2 = (1 << reduced_clock.n) << 16 |
4835
                reduced_clock.m1 << 8 | reduced_clock.m2;
4836
    } else {
4837
        fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4838
        if (has_reduced_clock)
4839
            fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4840
                reduced_clock.m2;
4841
    }
4842
 
4843
    dpll = DPLL_VGA_MODE_DIS;
4844
 
4845
    if (!IS_GEN2(dev)) {
4846
        if (is_lvds)
4847
            dpll |= DPLLB_MODE_LVDS;
4848
        else
4849
            dpll |= DPLLB_MODE_DAC_SERIAL;
4850
        if (is_sdvo) {
4851
            int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4852
            if (pixel_multiplier > 1) {
4853
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4854
                    dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4855
            }
4856
            dpll |= DPLL_DVO_HIGH_SPEED;
4857
        }
4858
        if (is_dp)
4859
            dpll |= DPLL_DVO_HIGH_SPEED;
4860
 
4861
        /* compute bitmask from p1 value */
4862
        if (IS_PINEVIEW(dev))
4863
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4864
        else {
4865
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4866
            if (IS_G4X(dev) && has_reduced_clock)
4867
                dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4868
        }
4869
        switch (clock.p2) {
4870
        case 5:
4871
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4872
            break;
4873
        case 7:
4874
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4875
            break;
4876
        case 10:
4877
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4878
            break;
4879
        case 14:
4880
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4881
            break;
4882
        }
4883
        if (INTEL_INFO(dev)->gen >= 4)
4884
            dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4885
    } else {
4886
        if (is_lvds) {
4887
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4888
        } else {
4889
            if (clock.p1 == 2)
4890
                dpll |= PLL_P1_DIVIDE_BY_TWO;
4891
            else
4892
                dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4893
            if (clock.p2 == 4)
4894
                dpll |= PLL_P2_DIVIDE_BY_4;
4895
        }
4896
    }
4897
 
4898
    if (is_sdvo && is_tv)
4899
        dpll |= PLL_REF_INPUT_TVCLKINBC;
4900
    else if (is_tv)
4901
        /* XXX: just matching BIOS for now */
4902
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
4903
        dpll |= 3;
4904
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4905
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4906
    else
4907
        dpll |= PLL_REF_INPUT_DREFCLK;
4908
 
4909
    /* setup pipeconf */
4910
    pipeconf = I915_READ(PIPECONF(pipe));
4911
 
4912
    /* Set up the display plane register */
4913
    dspcntr = DISPPLANE_GAMMA_ENABLE;
4914
 
4915
    /* Ironlake's plane is forced to pipe, bit 24 is to
4916
       enable color space conversion */
4917
    if (pipe == 0)
4918
        dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4919
    else
4920
        dspcntr |= DISPPLANE_SEL_PIPE_B;
4921
 
4922
    if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4923
        /* Enable pixel doubling when the dot clock is > 90% of the (display)
4924
         * core speed.
4925
         *
4926
         * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4927
         * pipe == 0 check?
4928
         */
4929
        if (mode->clock >
4930
            dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4931
            pipeconf |= PIPECONF_DOUBLE_WIDE;
4932
        else
4933
            pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4934
    }
4935
 
4936
    dpll |= DPLL_VCO_ENABLE;
4937
 
4938
    DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4939
    drm_mode_debug_printmodeline(mode);
4940
 
4941
    I915_WRITE(FP0(pipe), fp);
4942
    I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4943
 
4944
    POSTING_READ(DPLL(pipe));
4945
    udelay(150);
4946
 
4947
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4948
     * This is an exception to the general rule that mode_set doesn't turn
4949
     * things on.
4950
     */
4951
    if (is_lvds) {
4952
        temp = I915_READ(LVDS);
4953
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4954
        if (pipe == 1) {
4955
            temp |= LVDS_PIPEB_SELECT;
4956
        } else {
4957
            temp &= ~LVDS_PIPEB_SELECT;
4958
        }
4959
        /* set the corresponsding LVDS_BORDER bit */
4960
        temp |= dev_priv->lvds_border_bits;
4961
        /* Set the B0-B3 data pairs corresponding to whether we're going to
4962
         * set the DPLLs for dual-channel mode or not.
4963
         */
4964
        if (clock.p2 == 7)
4965
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4966
        else
4967
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4968
 
4969
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4970
         * appropriately here, but we need to look more thoroughly into how
4971
         * panels behave in the two modes.
4972
         */
4973
        /* set the dithering flag on LVDS as needed */
4974
        if (INTEL_INFO(dev)->gen >= 4) {
4975
            if (dev_priv->lvds_dither)
4976
                temp |= LVDS_ENABLE_DITHER;
4977
            else
4978
                temp &= ~LVDS_ENABLE_DITHER;
4979
        }
4980
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4981
            lvds_sync |= LVDS_HSYNC_POLARITY;
4982
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4983
            lvds_sync |= LVDS_VSYNC_POLARITY;
4984
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
4985
            != lvds_sync) {
4986
            char flags[2] = "-+";
4987
            DRM_INFO("Changing LVDS panel from "
4988
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
4989
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
4990
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
4991
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
4992
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
4993
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4994
            temp |= lvds_sync;
4995
        }
4996
        I915_WRITE(LVDS, temp);
4997
    }
4998
 
4999
    if (is_dp) {
5000
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
5001
    }
5002
 
5003
    I915_WRITE(DPLL(pipe), dpll);
5004
 
5005
    /* Wait for the clocks to stabilize. */
5006
    POSTING_READ(DPLL(pipe));
5007
    udelay(150);
5008
 
5009
    if (INTEL_INFO(dev)->gen >= 4) {
5010
        temp = 0;
5011
        if (is_sdvo) {
5012
            temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5013
            if (temp > 1)
5014
                temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5015
            else
5016
                temp = 0;
5017
        }
5018
        I915_WRITE(DPLL_MD(pipe), temp);
5019
    } else {
5020
        /* The pixel multiplier can only be updated once the
5021
         * DPLL is enabled and the clocks are stable.
5022
         *
5023
         * So write it again.
5024
         */
5025
        I915_WRITE(DPLL(pipe), dpll);
5026
    }
5027
 
5028
    intel_crtc->lowfreq_avail = false;
5029
    if (is_lvds && has_reduced_clock && i915_powersave) {
5030
        I915_WRITE(FP1(pipe), fp2);
5031
        intel_crtc->lowfreq_avail = true;
5032
        if (HAS_PIPE_CXSR(dev)) {
5033
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5034
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5035
        }
5036
    } else {
5037
        I915_WRITE(FP1(pipe), fp);
5038
        if (HAS_PIPE_CXSR(dev)) {
5039
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5040
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5041
        }
5042
    }
5043
 
5044
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5045
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5046
        /* the chip adds 2 halflines automatically */
5047
        adjusted_mode->crtc_vdisplay -= 1;
5048
        adjusted_mode->crtc_vtotal -= 1;
5049
        adjusted_mode->crtc_vblank_start -= 1;
5050
        adjusted_mode->crtc_vblank_end -= 1;
5051
        adjusted_mode->crtc_vsync_end -= 1;
5052
        adjusted_mode->crtc_vsync_start -= 1;
5053
    } else
5054
        pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
5055
 
5056
    I915_WRITE(HTOTAL(pipe),
5057
           (adjusted_mode->crtc_hdisplay - 1) |
5058
           ((adjusted_mode->crtc_htotal - 1) << 16));
5059
    I915_WRITE(HBLANK(pipe),
5060
           (adjusted_mode->crtc_hblank_start - 1) |
5061
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
5062
    I915_WRITE(HSYNC(pipe),
5063
           (adjusted_mode->crtc_hsync_start - 1) |
5064
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
5065
 
5066
    I915_WRITE(VTOTAL(pipe),
5067
           (adjusted_mode->crtc_vdisplay - 1) |
5068
           ((adjusted_mode->crtc_vtotal - 1) << 16));
5069
    I915_WRITE(VBLANK(pipe),
5070
           (adjusted_mode->crtc_vblank_start - 1) |
5071
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
5072
    I915_WRITE(VSYNC(pipe),
5073
           (adjusted_mode->crtc_vsync_start - 1) |
5074
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
5075
 
5076
    /* pipesrc and dspsize control the size that is scaled from,
5077
     * which should always be the user's requested size.
5078
     */
5079
    I915_WRITE(DSPSIZE(plane),
5080
           ((mode->vdisplay - 1) << 16) |
5081
           (mode->hdisplay - 1));
5082
    I915_WRITE(DSPPOS(plane), 0);
5083
    I915_WRITE(PIPESRC(pipe),
5084
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5085
 
5086
    I915_WRITE(PIPECONF(pipe), pipeconf);
5087
    POSTING_READ(PIPECONF(pipe));
5088
    intel_enable_pipe(dev_priv, pipe, false);
5089
 
5090
    intel_wait_for_vblank(dev, pipe);
5091
 
5092
    I915_WRITE(DSPCNTR(plane), dspcntr);
5093
    POSTING_READ(DSPCNTR(plane));
5094
    intel_enable_plane(dev_priv, plane, pipe);
5095
 
5096
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
5097
 
5098
    intel_update_watermarks(dev);
5099
 
5100
    return ret;
5101
}
5102
 
5103
static void ironlake_update_pch_refclk(struct drm_device *dev)
5104
{
5105
	struct drm_i915_private *dev_priv = dev->dev_private;
5106
	struct drm_mode_config *mode_config = &dev->mode_config;
5107
	struct drm_crtc *crtc;
5108
	struct intel_encoder *encoder;
5109
	struct intel_encoder *has_edp_encoder = NULL;
5110
	u32 temp;
5111
	bool has_lvds = false;
5112
 
5113
	/* We need to take the global config into account */
5114
	list_for_each_entry(crtc, &mode_config->crtc_list, head) {
5115
		if (!crtc->enabled)
5116
			continue;
5117
 
5118
		list_for_each_entry(encoder, &mode_config->encoder_list,
5119
				    base.head) {
5120
			if (encoder->base.crtc != crtc)
5121
				continue;
5122
 
5123
			switch (encoder->type) {
5124
			case INTEL_OUTPUT_LVDS:
5125
				has_lvds = true;
5126
			case INTEL_OUTPUT_EDP:
5127
				has_edp_encoder = encoder;
5128
				break;
5129
			}
5130
		}
5131
	}
5132
 
5133
	/* Ironlake: try to setup display ref clock before DPLL
5134
	 * enabling. This is only under driver's control after
5135
	 * PCH B stepping, previous chipset stepping should be
5136
	 * ignoring this setting.
5137
	 */
5138
	temp = I915_READ(PCH_DREF_CONTROL);
5139
	/* Always enable nonspread source */
5140
	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5141
	temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5142
	temp &= ~DREF_SSC_SOURCE_MASK;
5143
	temp |= DREF_SSC_SOURCE_ENABLE;
5144
	I915_WRITE(PCH_DREF_CONTROL, temp);
5145
 
5146
	POSTING_READ(PCH_DREF_CONTROL);
5147
	udelay(200);
5148
 
5149
	if (has_edp_encoder) {
5150
		if (intel_panel_use_ssc(dev_priv)) {
5151
			temp |= DREF_SSC1_ENABLE;
5152
			I915_WRITE(PCH_DREF_CONTROL, temp);
5153
 
5154
			POSTING_READ(PCH_DREF_CONTROL);
5155
			udelay(200);
5156
		}
5157
		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5158
 
5159
		/* Enable CPU source on CPU attached eDP */
5160
		if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5161
			if (intel_panel_use_ssc(dev_priv))
5162
				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5163
			else
5164
				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5165
		} else {
5166
			/* Enable SSC on PCH eDP if needed */
5167
			if (intel_panel_use_ssc(dev_priv)) {
5168
				DRM_ERROR("enabling SSC on PCH\n");
5169
				temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
5170
			}
5171
		}
5172
		I915_WRITE(PCH_DREF_CONTROL, temp);
5173
		POSTING_READ(PCH_DREF_CONTROL);
5174
		udelay(200);
5175
	}
5176
}
5177
 
5178
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5179
                  struct drm_display_mode *mode,
5180
                  struct drm_display_mode *adjusted_mode,
5181
                  int x, int y,
5182
                  struct drm_framebuffer *old_fb)
5183
{
5184
    struct drm_device *dev = crtc->dev;
5185
    struct drm_i915_private *dev_priv = dev->dev_private;
5186
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5187
    int pipe = intel_crtc->pipe;
5188
    int plane = intel_crtc->plane;
5189
    int refclk, num_connectors = 0;
5190
    intel_clock_t clock, reduced_clock;
5191
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5192
    bool ok, has_reduced_clock = false, is_sdvo = false;
5193
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5194
    struct intel_encoder *has_edp_encoder = NULL;
5195
    struct drm_mode_config *mode_config = &dev->mode_config;
5196
    struct intel_encoder *encoder;
5197
    const intel_limit_t *limit;
5198
    int ret;
5199
    struct fdi_m_n m_n = {0};
5200
    u32 temp;
5201
    u32 lvds_sync = 0;
5202
    int target_clock, pixel_multiplier, lane, link_bw, factor;
5203
    unsigned int pipe_bpp;
5204
    bool dither;
5205
 
2336 Serge 5206
    ENTER();
5207
 
2327 Serge 5208
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5209
        if (encoder->base.crtc != crtc)
5210
            continue;
5211
 
5212
        switch (encoder->type) {
5213
        case INTEL_OUTPUT_LVDS:
5214
            is_lvds = true;
5215
            break;
5216
        case INTEL_OUTPUT_SDVO:
5217
        case INTEL_OUTPUT_HDMI:
5218
            is_sdvo = true;
5219
            if (encoder->needs_tv_clock)
5220
                is_tv = true;
5221
            break;
5222
        case INTEL_OUTPUT_TVOUT:
5223
            is_tv = true;
5224
            break;
5225
        case INTEL_OUTPUT_ANALOG:
5226
            is_crt = true;
5227
            break;
5228
        case INTEL_OUTPUT_DISPLAYPORT:
5229
            is_dp = true;
5230
            break;
5231
        case INTEL_OUTPUT_EDP:
5232
            has_edp_encoder = encoder;
5233
            break;
5234
        }
5235
 
5236
        num_connectors++;
5237
    }
5238
 
5239
    if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5240
        refclk = dev_priv->lvds_ssc_freq * 1000;
5241
        DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5242
                  refclk / 1000);
5243
    } else {
5244
        refclk = 96000;
5245
        if (!has_edp_encoder ||
5246
            intel_encoder_is_pch_edp(&has_edp_encoder->base))
5247
            refclk = 120000; /* 120Mhz refclk */
5248
    }
5249
 
5250
    /*
5251
     * Returns a set of divisors for the desired target clock with the given
5252
     * refclk, or FALSE.  The returned values represent the clock equation:
5253
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5254
     */
5255
    limit = intel_limit(crtc, refclk);
5256
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
5257
    if (!ok) {
5258
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
5259
        return -EINVAL;
5260
    }
5261
 
5262
    /* Ensure that the cursor is valid for the new mode before changing... */
5263
//    intel_crtc_update_cursor(crtc, true);
5264
 
5265
    if (is_lvds && dev_priv->lvds_downclock_avail) {
5266
        has_reduced_clock = limit->find_pll(limit, crtc,
5267
                            dev_priv->lvds_downclock,
5268
                            refclk,
5269
                            &reduced_clock);
5270
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
5271
            /*
5272
             * If the different P is found, it means that we can't
5273
             * switch the display clock by using the FP0/FP1.
5274
             * In such case we will disable the LVDS downclock
5275
             * feature.
5276
             */
5277
            DRM_DEBUG_KMS("Different P is found for "
5278
                      "LVDS clock/downclock\n");
5279
            has_reduced_clock = 0;
5280
        }
5281
    }
5282
    /* SDVO TV has fixed PLL values depend on its clock range,
5283
       this mirrors vbios setting. */
5284
    if (is_sdvo && is_tv) {
5285
        if (adjusted_mode->clock >= 100000
5286
            && adjusted_mode->clock < 140500) {
5287
            clock.p1 = 2;
5288
            clock.p2 = 10;
5289
            clock.n = 3;
5290
            clock.m1 = 16;
5291
            clock.m2 = 8;
5292
        } else if (adjusted_mode->clock >= 140500
5293
               && adjusted_mode->clock <= 200000) {
5294
            clock.p1 = 1;
5295
            clock.p2 = 10;
5296
            clock.n = 6;
5297
            clock.m1 = 12;
5298
            clock.m2 = 8;
5299
        }
5300
    }
5301
 
5302
    /* FDI link */
5303
    pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5304
    lane = 0;
5305
    /* CPU eDP doesn't require FDI link, so just set DP M/N
5306
       according to current link config */
5307
    if (has_edp_encoder &&
5308
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5309
        target_clock = mode->clock;
5310
        intel_edp_link_config(has_edp_encoder,
5311
                      &lane, &link_bw);
5312
    } else {
5313
        /* [e]DP over FDI requires target mode clock
5314
           instead of link clock */
5315
        if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5316
            target_clock = mode->clock;
5317
        else
5318
            target_clock = adjusted_mode->clock;
5319
 
5320
        /* FDI is a binary signal running at ~2.7GHz, encoding
5321
         * each output octet as 10 bits. The actual frequency
5322
         * is stored as a divider into a 100MHz clock, and the
5323
         * mode pixel clock is stored in units of 1KHz.
5324
         * Hence the bw of each lane in terms of the mode signal
5325
         * is:
5326
         */
5327
        link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5328
    }
5329
 
5330
    /* determine panel color depth */
5331
    temp = I915_READ(PIPECONF(pipe));
5332
    temp &= ~PIPE_BPC_MASK;
5333
    dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
5334
    switch (pipe_bpp) {
5335
    case 18:
5336
        temp |= PIPE_6BPC;
5337
        break;
5338
    case 24:
5339
        temp |= PIPE_8BPC;
5340
        break;
5341
    case 30:
5342
        temp |= PIPE_10BPC;
5343
        break;
5344
    case 36:
5345
        temp |= PIPE_12BPC;
5346
        break;
5347
    default:
5348
        WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5349
            pipe_bpp);
5350
        temp |= PIPE_8BPC;
5351
        pipe_bpp = 24;
5352
        break;
5353
    }
5354
 
5355
    intel_crtc->bpp = pipe_bpp;
5356
    I915_WRITE(PIPECONF(pipe), temp);
5357
 
5358
    if (!lane) {
5359
        /*
5360
         * Account for spread spectrum to avoid
5361
         * oversubscribing the link. Max center spread
5362
         * is 2.5%; use 5% for safety's sake.
5363
         */
5364
        u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5365
        lane = bps / (link_bw * 8) + 1;
5366
    }
5367
 
5368
    intel_crtc->fdi_lanes = lane;
5369
 
5370
    if (pixel_multiplier > 1)
5371
        link_bw *= pixel_multiplier;
5372
    ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5373
                 &m_n);
5374
 
5375
    ironlake_update_pch_refclk(dev);
5376
 
5377
    fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5378
    if (has_reduced_clock)
5379
        fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5380
            reduced_clock.m2;
5381
 
5382
    /* Enable autotuning of the PLL clock (if permissible) */
5383
    factor = 21;
5384
    if (is_lvds) {
5385
        if ((intel_panel_use_ssc(dev_priv) &&
5386
             dev_priv->lvds_ssc_freq == 100) ||
5387
            (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5388
            factor = 25;
5389
    } else if (is_sdvo && is_tv)
5390
        factor = 20;
5391
 
5392
    if (clock.m < factor * clock.n)
5393
        fp |= FP_CB_TUNE;
5394
 
5395
    dpll = 0;
5396
 
5397
    if (is_lvds)
5398
        dpll |= DPLLB_MODE_LVDS;
5399
    else
5400
        dpll |= DPLLB_MODE_DAC_SERIAL;
5401
    if (is_sdvo) {
5402
        int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5403
        if (pixel_multiplier > 1) {
5404
            dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5405
        }
5406
        dpll |= DPLL_DVO_HIGH_SPEED;
5407
    }
5408
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5409
        dpll |= DPLL_DVO_HIGH_SPEED;
5410
 
5411
    /* compute bitmask from p1 value */
5412
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5413
    /* also FPA1 */
5414
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5415
 
5416
    switch (clock.p2) {
5417
    case 5:
5418
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5419
        break;
5420
    case 7:
5421
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5422
        break;
5423
    case 10:
5424
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5425
        break;
5426
    case 14:
5427
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5428
        break;
5429
    }
5430
 
5431
    if (is_sdvo && is_tv)
5432
        dpll |= PLL_REF_INPUT_TVCLKINBC;
5433
    else if (is_tv)
5434
        /* XXX: just matching BIOS for now */
5435
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
5436
        dpll |= 3;
5437
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5438
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5439
    else
5440
        dpll |= PLL_REF_INPUT_DREFCLK;
5441
 
5442
    /* setup pipeconf */
5443
    pipeconf = I915_READ(PIPECONF(pipe));
5444
 
5445
    /* Set up the display plane register */
5446
    dspcntr = DISPPLANE_GAMMA_ENABLE;
5447
 
5448
    DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5449
    drm_mode_debug_printmodeline(mode);
5450
 
5451
    /* PCH eDP needs FDI, but CPU eDP does not */
5452
    if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5453
        I915_WRITE(PCH_FP0(pipe), fp);
5454
        I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5455
 
5456
        POSTING_READ(PCH_DPLL(pipe));
5457
        udelay(150);
5458
    }
5459
 
5460
    /* enable transcoder DPLL */
5461
    if (HAS_PCH_CPT(dev)) {
5462
        temp = I915_READ(PCH_DPLL_SEL);
5463
        switch (pipe) {
5464
        case 0:
5465
            temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
5466
            break;
5467
        case 1:
5468
            temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
5469
            break;
5470
        case 2:
5471
            /* FIXME: manage transcoder PLLs? */
5472
            temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
5473
            break;
5474
        default:
5475
            BUG();
5476
        }
5477
        I915_WRITE(PCH_DPLL_SEL, temp);
5478
 
5479
        POSTING_READ(PCH_DPLL_SEL);
5480
        udelay(150);
5481
    }
5482
 
5483
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5484
     * This is an exception to the general rule that mode_set doesn't turn
5485
     * things on.
5486
     */
5487
    if (is_lvds) {
5488
        temp = I915_READ(PCH_LVDS);
5489
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5490
        if (pipe == 1) {
5491
            if (HAS_PCH_CPT(dev))
5492
                temp |= PORT_TRANS_B_SEL_CPT;
5493
            else
5494
                temp |= LVDS_PIPEB_SELECT;
5495
        } else {
5496
            if (HAS_PCH_CPT(dev))
5497
                temp &= ~PORT_TRANS_SEL_MASK;
5498
            else
5499
                temp &= ~LVDS_PIPEB_SELECT;
5500
        }
5501
        /* set the corresponsding LVDS_BORDER bit */
5502
        temp |= dev_priv->lvds_border_bits;
5503
        /* Set the B0-B3 data pairs corresponding to whether we're going to
5504
         * set the DPLLs for dual-channel mode or not.
5505
         */
5506
        if (clock.p2 == 7)
5507
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5508
        else
5509
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5510
 
5511
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5512
         * appropriately here, but we need to look more thoroughly into how
5513
         * panels behave in the two modes.
5514
         */
5515
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5516
            lvds_sync |= LVDS_HSYNC_POLARITY;
5517
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5518
            lvds_sync |= LVDS_VSYNC_POLARITY;
5519
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5520
            != lvds_sync) {
5521
            char flags[2] = "-+";
5522
            DRM_INFO("Changing LVDS panel from "
5523
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5524
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
5525
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
5526
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5527
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5528
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5529
            temp |= lvds_sync;
5530
        }
5531
        I915_WRITE(PCH_LVDS, temp);
5532
    }
5533
 
5534
    pipeconf &= ~PIPECONF_DITHER_EN;
5535
    pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5536
    if ((is_lvds && dev_priv->lvds_dither) || dither) {
5537
        pipeconf |= PIPECONF_DITHER_EN;
5538
        pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5539
    }
5540
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5541
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
5542
    } else {
5543
        /* For non-DP output, clear any trans DP clock recovery setting.*/
5544
        I915_WRITE(TRANSDATA_M1(pipe), 0);
5545
        I915_WRITE(TRANSDATA_N1(pipe), 0);
5546
        I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5547
        I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5548
    }
5549
 
5550
    if (!has_edp_encoder ||
5551
        intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5552
        I915_WRITE(PCH_DPLL(pipe), dpll);
5553
 
5554
        /* Wait for the clocks to stabilize. */
5555
        POSTING_READ(PCH_DPLL(pipe));
5556
        udelay(150);
5557
 
5558
        /* The pixel multiplier can only be updated once the
5559
         * DPLL is enabled and the clocks are stable.
5560
         *
5561
         * So write it again.
5562
         */
5563
        I915_WRITE(PCH_DPLL(pipe), dpll);
5564
    }
5565
 
5566
    intel_crtc->lowfreq_avail = false;
5567
    if (is_lvds && has_reduced_clock && i915_powersave) {
5568
        I915_WRITE(PCH_FP1(pipe), fp2);
5569
        intel_crtc->lowfreq_avail = true;
5570
        if (HAS_PIPE_CXSR(dev)) {
5571
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5572
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5573
        }
5574
    } else {
5575
        I915_WRITE(PCH_FP1(pipe), fp);
5576
        if (HAS_PIPE_CXSR(dev)) {
5577
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5578
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5579
        }
5580
    }
5581
 
5582
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5583
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5584
        /* the chip adds 2 halflines automatically */
5585
        adjusted_mode->crtc_vdisplay -= 1;
5586
        adjusted_mode->crtc_vtotal -= 1;
5587
        adjusted_mode->crtc_vblank_start -= 1;
5588
        adjusted_mode->crtc_vblank_end -= 1;
5589
        adjusted_mode->crtc_vsync_end -= 1;
5590
        adjusted_mode->crtc_vsync_start -= 1;
5591
    } else
5592
        pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
5593
 
5594
    I915_WRITE(HTOTAL(pipe),
5595
           (adjusted_mode->crtc_hdisplay - 1) |
5596
           ((adjusted_mode->crtc_htotal - 1) << 16));
5597
    I915_WRITE(HBLANK(pipe),
5598
           (adjusted_mode->crtc_hblank_start - 1) |
5599
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
5600
    I915_WRITE(HSYNC(pipe),
5601
           (adjusted_mode->crtc_hsync_start - 1) |
5602
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
5603
 
5604
    I915_WRITE(VTOTAL(pipe),
5605
           (adjusted_mode->crtc_vdisplay - 1) |
5606
           ((adjusted_mode->crtc_vtotal - 1) << 16));
5607
    I915_WRITE(VBLANK(pipe),
5608
           (adjusted_mode->crtc_vblank_start - 1) |
5609
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
5610
    I915_WRITE(VSYNC(pipe),
5611
           (adjusted_mode->crtc_vsync_start - 1) |
5612
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
5613
 
5614
    /* pipesrc controls the size that is scaled from, which should
5615
     * always be the user's requested size.
5616
     */
5617
    I915_WRITE(PIPESRC(pipe),
5618
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5619
 
5620
    I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5621
    I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5622
    I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5623
    I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5624
 
5625
    if (has_edp_encoder &&
5626
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5627
        ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5628
    }
5629
 
5630
    I915_WRITE(PIPECONF(pipe), pipeconf);
5631
    POSTING_READ(PIPECONF(pipe));
5632
 
5633
    intel_wait_for_vblank(dev, pipe);
5634
 
5635
    if (IS_GEN5(dev)) {
5636
        /* enable address swizzle for tiling buffer */
5637
        temp = I915_READ(DISP_ARB_CTL);
5638
        I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
5639
    }
5640
 
5641
    I915_WRITE(DSPCNTR(plane), dspcntr);
5642
    POSTING_READ(DSPCNTR(plane));
5643
 
5644
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
5645
 
2336 Serge 5646
    dbgprintf("Set base\n");
5647
 
2327 Serge 5648
    intel_update_watermarks(dev);
5649
 
2336 Serge 5650
    LEAVE();
5651
 
2327 Serge 5652
    return ret;
5653
}
5654
 
2330 Serge 5655
static int intel_crtc_mode_set(struct drm_crtc *crtc,
5656
			       struct drm_display_mode *mode,
5657
			       struct drm_display_mode *adjusted_mode,
5658
			       int x, int y,
5659
			       struct drm_framebuffer *old_fb)
5660
{
5661
	struct drm_device *dev = crtc->dev;
5662
	struct drm_i915_private *dev_priv = dev->dev_private;
5663
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5664
	int pipe = intel_crtc->pipe;
5665
	int ret;
2327 Serge 5666
 
2330 Serge 5667
//	drm_vblank_pre_modeset(dev, pipe);
2336 Serge 5668
    ENTER();
2327 Serge 5669
 
2330 Serge 5670
	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5671
					      x, y, old_fb);
2327 Serge 5672
 
2330 Serge 5673
//	drm_vblank_post_modeset(dev, pipe);
2327 Serge 5674
 
2330 Serge 5675
	intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
2336 Serge 5676
    LEAVE();
2327 Serge 5677
 
2330 Serge 5678
	return ret;
5679
}
2327 Serge 5680
 
5681
/** Loads the palette/gamma unit for the CRTC with the prepared values */
5682
void intel_crtc_load_lut(struct drm_crtc *crtc)
5683
{
5684
	struct drm_device *dev = crtc->dev;
5685
	struct drm_i915_private *dev_priv = dev->dev_private;
5686
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5687
	int palreg = PALETTE(intel_crtc->pipe);
5688
	int i;
5689
 
5690
	/* The clocks have to be on to load the palette. */
5691
	if (!crtc->enabled)
5692
		return;
5693
 
5694
	/* use legacy palette for Ironlake */
5695
	if (HAS_PCH_SPLIT(dev))
5696
		palreg = LGC_PALETTE(intel_crtc->pipe);
5697
 
5698
	for (i = 0; i < 256; i++) {
5699
		I915_WRITE(palreg + 4 * i,
5700
			   (intel_crtc->lut_r[i] << 16) |
5701
			   (intel_crtc->lut_g[i] << 8) |
5702
			   intel_crtc->lut_b[i]);
5703
	}
5704
}
5705
 
5706
 
5707
 
5708
 
5709
 
5710
 
5711
 
5712
 
5713
 
5714
 
5715
 
5716
 
5717
 
5718
 
5719
 
5720
 
5721
 
5722
 
5723
 
5724
 
5725
 
5726
 
5727
 
5728
 
5729
 
5730
 
5731
 
5732
 
5733
 
5734
 
5735
 
5736
 
5737
 
5738
 
5739
 
5740
 
5741
 
2332 Serge 5742
/** Sets the color ramps on behalf of RandR */
5743
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
5744
				 u16 blue, int regno)
5745
{
5746
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 5747
 
2332 Serge 5748
	intel_crtc->lut_r[regno] = red >> 8;
5749
	intel_crtc->lut_g[regno] = green >> 8;
5750
	intel_crtc->lut_b[regno] = blue >> 8;
5751
}
2327 Serge 5752
 
2332 Serge 5753
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5754
			     u16 *blue, int regno)
5755
{
5756
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 5757
 
2332 Serge 5758
	*red = intel_crtc->lut_r[regno] << 8;
5759
	*green = intel_crtc->lut_g[regno] << 8;
5760
	*blue = intel_crtc->lut_b[regno] << 8;
5761
}
2327 Serge 5762
 
2330 Serge 5763
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
5764
				 u16 *blue, uint32_t start, uint32_t size)
5765
{
5766
	int end = (start + size > 256) ? 256 : start + size, i;
5767
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 5768
 
2330 Serge 5769
	for (i = start; i < end; i++) {
5770
		intel_crtc->lut_r[i] = red[i] >> 8;
5771
		intel_crtc->lut_g[i] = green[i] >> 8;
5772
		intel_crtc->lut_b[i] = blue[i] >> 8;
5773
	}
2327 Serge 5774
 
2330 Serge 5775
	intel_crtc_load_lut(crtc);
5776
}
2327 Serge 5777
 
2330 Serge 5778
/**
5779
 * Get a pipe with a simple mode set on it for doing load-based monitor
5780
 * detection.
5781
 *
5782
 * It will be up to the load-detect code to adjust the pipe as appropriate for
5783
 * its requirements.  The pipe will be connected to no other encoders.
5784
 *
5785
 * Currently this code will only succeed if there is a pipe with no encoders
5786
 * configured for it.  In the future, it could choose to temporarily disable
5787
 * some outputs to free up a pipe for its use.
5788
 *
5789
 * \return crtc, or NULL if no pipes are available.
5790
 */
2327 Serge 5791
 
2330 Serge 5792
/* VESA 640x480x72Hz mode to set on the pipe */
5793
static struct drm_display_mode load_detect_mode = {
5794
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
5795
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5796
};
2327 Serge 5797
 
5798
 
5799
 
5800
 
5801
 
2330 Serge 5802
static u32
5803
intel_framebuffer_pitch_for_width(int width, int bpp)
5804
{
5805
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
5806
	return ALIGN(pitch, 64);
5807
}
2327 Serge 5808
 
2330 Serge 5809
static u32
5810
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
5811
{
5812
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5813
	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
5814
}
2327 Serge 5815
 
2330 Serge 5816
static struct drm_framebuffer *
5817
intel_framebuffer_create_for_mode(struct drm_device *dev,
5818
				  struct drm_display_mode *mode,
5819
				  int depth, int bpp)
5820
{
5821
	struct drm_i915_gem_object *obj;
5822
	struct drm_mode_fb_cmd mode_cmd;
2327 Serge 5823
 
2330 Serge 5824
//	obj = i915_gem_alloc_object(dev,
5825
//				    intel_framebuffer_size_for_mode(mode, bpp));
5826
//	if (obj == NULL)
5827
		return ERR_PTR(-ENOMEM);
2327 Serge 5828
 
2330 Serge 5829
//	mode_cmd.width = mode->hdisplay;
5830
//	mode_cmd.height = mode->vdisplay;
5831
//	mode_cmd.depth = depth;
5832
//	mode_cmd.bpp = bpp;
5833
//	mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
2327 Serge 5834
 
2330 Serge 5835
//	return intel_framebuffer_create(dev, &mode_cmd, obj);
5836
}
2327 Serge 5837
 
2330 Serge 5838
static struct drm_framebuffer *
5839
mode_fits_in_fbdev(struct drm_device *dev,
5840
		   struct drm_display_mode *mode)
5841
{
5842
	struct drm_i915_private *dev_priv = dev->dev_private;
5843
	struct drm_i915_gem_object *obj;
5844
	struct drm_framebuffer *fb;
2327 Serge 5845
 
2330 Serge 5846
//	if (dev_priv->fbdev == NULL)
5847
//		return NULL;
2327 Serge 5848
 
2330 Serge 5849
//	obj = dev_priv->fbdev->ifb.obj;
5850
//	if (obj == NULL)
5851
//		return NULL;
2327 Serge 5852
 
2330 Serge 5853
//	fb = &dev_priv->fbdev->ifb.base;
5854
//	if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
5855
//							  fb->bits_per_pixel))
5856
		return NULL;
2327 Serge 5857
 
2330 Serge 5858
//	if (obj->base.size < mode->vdisplay * fb->pitch)
5859
//		return NULL;
2327 Serge 5860
 
2330 Serge 5861
//	return fb;
5862
}
2327 Serge 5863
 
2330 Serge 5864
bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5865
				struct drm_connector *connector,
5866
				struct drm_display_mode *mode,
5867
				struct intel_load_detect_pipe *old)
5868
{
5869
	struct intel_crtc *intel_crtc;
5870
	struct drm_crtc *possible_crtc;
5871
	struct drm_encoder *encoder = &intel_encoder->base;
5872
	struct drm_crtc *crtc = NULL;
5873
	struct drm_device *dev = encoder->dev;
5874
	struct drm_framebuffer *old_fb;
5875
	int i = -1;
2327 Serge 5876
 
2330 Serge 5877
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5878
		      connector->base.id, drm_get_connector_name(connector),
5879
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 5880
 
2330 Serge 5881
	/*
5882
	 * Algorithm gets a little messy:
5883
	 *
5884
	 *   - if the connector already has an assigned crtc, use it (but make
5885
	 *     sure it's on first)
5886
	 *
5887
	 *   - try to find the first unused crtc that can drive this connector,
5888
	 *     and use that if we find one
5889
	 */
2327 Serge 5890
 
2330 Serge 5891
	/* See if we already have a CRTC for this connector */
5892
	if (encoder->crtc) {
5893
		crtc = encoder->crtc;
2327 Serge 5894
 
2330 Serge 5895
		intel_crtc = to_intel_crtc(crtc);
5896
		old->dpms_mode = intel_crtc->dpms_mode;
5897
		old->load_detect_temp = false;
2327 Serge 5898
 
2330 Serge 5899
		/* Make sure the crtc and connector are running */
5900
		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5901
			struct drm_encoder_helper_funcs *encoder_funcs;
5902
			struct drm_crtc_helper_funcs *crtc_funcs;
2327 Serge 5903
 
2330 Serge 5904
			crtc_funcs = crtc->helper_private;
5905
			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
2327 Serge 5906
 
2330 Serge 5907
			encoder_funcs = encoder->helper_private;
5908
			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5909
		}
2327 Serge 5910
 
2330 Serge 5911
		return true;
5912
	}
2327 Serge 5913
 
2330 Serge 5914
	/* Find an unused one (if possible) */
5915
	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
5916
		i++;
5917
		if (!(encoder->possible_crtcs & (1 << i)))
5918
			continue;
5919
		if (!possible_crtc->enabled) {
5920
			crtc = possible_crtc;
5921
			break;
5922
		}
5923
	}
2327 Serge 5924
 
2330 Serge 5925
	/*
5926
	 * If we didn't find an unused CRTC, don't use any.
5927
	 */
5928
	if (!crtc) {
5929
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
5930
		return false;
5931
	}
2327 Serge 5932
 
2330 Serge 5933
	encoder->crtc = crtc;
5934
	connector->encoder = encoder;
2327 Serge 5935
 
2330 Serge 5936
	intel_crtc = to_intel_crtc(crtc);
5937
	old->dpms_mode = intel_crtc->dpms_mode;
5938
	old->load_detect_temp = true;
5939
	old->release_fb = NULL;
2327 Serge 5940
 
2330 Serge 5941
	if (!mode)
5942
		mode = &load_detect_mode;
2327 Serge 5943
 
2330 Serge 5944
	old_fb = crtc->fb;
2327 Serge 5945
 
2330 Serge 5946
	/* We need a framebuffer large enough to accommodate all accesses
5947
	 * that the plane may generate whilst we perform load detection.
5948
	 * We can not rely on the fbcon either being present (we get called
5949
	 * during its initialisation to detect all boot displays, or it may
5950
	 * not even exist) or that it is large enough to satisfy the
5951
	 * requested mode.
5952
	 */
5953
	crtc->fb = mode_fits_in_fbdev(dev, mode);
5954
	if (crtc->fb == NULL) {
5955
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5956
		crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5957
		old->release_fb = crtc->fb;
5958
	} else
5959
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5960
	if (IS_ERR(crtc->fb)) {
5961
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5962
		crtc->fb = old_fb;
5963
		return false;
5964
	}
2327 Serge 5965
 
2330 Serge 5966
	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
5967
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5968
		if (old->release_fb)
5969
			old->release_fb->funcs->destroy(old->release_fb);
5970
		crtc->fb = old_fb;
5971
		return false;
5972
	}
2327 Serge 5973
 
2330 Serge 5974
	/* let the connector get through one full cycle before testing */
5975
	intel_wait_for_vblank(dev, intel_crtc->pipe);
2327 Serge 5976
 
2330 Serge 5977
	return true;
5978
}
2327 Serge 5979
 
2330 Serge 5980
void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5981
				    struct drm_connector *connector,
5982
				    struct intel_load_detect_pipe *old)
5983
{
5984
	struct drm_encoder *encoder = &intel_encoder->base;
5985
	struct drm_device *dev = encoder->dev;
5986
	struct drm_crtc *crtc = encoder->crtc;
5987
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5988
	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2327 Serge 5989
 
2330 Serge 5990
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5991
		      connector->base.id, drm_get_connector_name(connector),
5992
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 5993
 
2330 Serge 5994
	if (old->load_detect_temp) {
5995
		connector->encoder = NULL;
5996
		drm_helper_disable_unused_functions(dev);
2327 Serge 5997
 
2330 Serge 5998
		if (old->release_fb)
5999
			old->release_fb->funcs->destroy(old->release_fb);
2327 Serge 6000
 
2330 Serge 6001
		return;
6002
	}
2327 Serge 6003
 
2330 Serge 6004
	/* Switch crtc and encoder back off if necessary */
6005
	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6006
		encoder_funcs->dpms(encoder, old->dpms_mode);
6007
		crtc_funcs->dpms(crtc, old->dpms_mode);
6008
	}
6009
}
2327 Serge 6010
 
2330 Serge 6011
/* Returns the clock of the currently programmed mode of the given pipe. */
6012
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6013
{
6014
	struct drm_i915_private *dev_priv = dev->dev_private;
6015
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6016
	int pipe = intel_crtc->pipe;
6017
	u32 dpll = I915_READ(DPLL(pipe));
6018
	u32 fp;
6019
	intel_clock_t clock;
2327 Serge 6020
 
2330 Serge 6021
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6022
		fp = I915_READ(FP0(pipe));
6023
	else
6024
		fp = I915_READ(FP1(pipe));
2327 Serge 6025
 
2330 Serge 6026
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6027
	if (IS_PINEVIEW(dev)) {
6028
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6029
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6030
	} else {
6031
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6032
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6033
	}
2327 Serge 6034
 
2330 Serge 6035
	if (!IS_GEN2(dev)) {
6036
		if (IS_PINEVIEW(dev))
6037
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6038
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6039
		else
6040
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6041
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 6042
 
2330 Serge 6043
		switch (dpll & DPLL_MODE_MASK) {
6044
		case DPLLB_MODE_DAC_SERIAL:
6045
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6046
				5 : 10;
6047
			break;
6048
		case DPLLB_MODE_LVDS:
6049
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6050
				7 : 14;
6051
			break;
6052
		default:
6053
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6054
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
6055
			return 0;
6056
		}
2327 Serge 6057
 
2330 Serge 6058
		/* XXX: Handle the 100Mhz refclk */
6059
		intel_clock(dev, 96000, &clock);
6060
	} else {
6061
		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
2327 Serge 6062
 
2330 Serge 6063
		if (is_lvds) {
6064
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6065
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6066
			clock.p2 = 14;
2327 Serge 6067
 
2330 Serge 6068
			if ((dpll & PLL_REF_INPUT_MASK) ==
6069
			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6070
				/* XXX: might not be 66MHz */
6071
				intel_clock(dev, 66000, &clock);
6072
			} else
6073
				intel_clock(dev, 48000, &clock);
6074
		} else {
6075
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6076
				clock.p1 = 2;
6077
			else {
6078
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6079
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6080
			}
6081
			if (dpll & PLL_P2_DIVIDE_BY_4)
6082
				clock.p2 = 4;
6083
			else
6084
				clock.p2 = 2;
2327 Serge 6085
 
2330 Serge 6086
			intel_clock(dev, 48000, &clock);
6087
		}
6088
	}
2327 Serge 6089
 
2330 Serge 6090
	/* XXX: It would be nice to validate the clocks, but we can't reuse
6091
	 * i830PllIsValid() because it relies on the xf86_config connector
6092
	 * configuration being accurate, which it isn't necessarily.
6093
	 */
2327 Serge 6094
 
2330 Serge 6095
	return clock.dot;
6096
}
2327 Serge 6097
 
2330 Serge 6098
/** Returns the currently programmed mode of the given pipe. */
6099
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6100
					     struct drm_crtc *crtc)
6101
{
6102
	struct drm_i915_private *dev_priv = dev->dev_private;
6103
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6104
	int pipe = intel_crtc->pipe;
6105
	struct drm_display_mode *mode;
6106
	int htot = I915_READ(HTOTAL(pipe));
6107
	int hsync = I915_READ(HSYNC(pipe));
6108
	int vtot = I915_READ(VTOTAL(pipe));
6109
	int vsync = I915_READ(VSYNC(pipe));
2327 Serge 6110
 
2330 Serge 6111
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6112
	if (!mode)
6113
		return NULL;
6114
 
6115
	mode->clock = intel_crtc_clock_get(dev, crtc);
6116
	mode->hdisplay = (htot & 0xffff) + 1;
6117
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6118
	mode->hsync_start = (hsync & 0xffff) + 1;
6119
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6120
	mode->vdisplay = (vtot & 0xffff) + 1;
6121
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6122
	mode->vsync_start = (vsync & 0xffff) + 1;
6123
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6124
 
6125
	drm_mode_set_name(mode);
6126
	drm_mode_set_crtcinfo(mode, 0);
6127
 
6128
	return mode;
6129
}
6130
 
6131
#define GPU_IDLE_TIMEOUT 500 /* ms */
6132
 
6133
 
6134
 
6135
 
6136
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
6137
 
6138
 
6139
 
6140
 
2327 Serge 6141
static void intel_increase_pllclock(struct drm_crtc *crtc)
6142
{
6143
	struct drm_device *dev = crtc->dev;
6144
	drm_i915_private_t *dev_priv = dev->dev_private;
6145
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6146
	int pipe = intel_crtc->pipe;
6147
	int dpll_reg = DPLL(pipe);
6148
	int dpll;
6149
 
2336 Serge 6150
    ENTER();
6151
 
2327 Serge 6152
	if (HAS_PCH_SPLIT(dev))
6153
		return;
6154
 
6155
	if (!dev_priv->lvds_downclock_avail)
6156
		return;
6157
 
6158
	dpll = I915_READ(dpll_reg);
6159
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
6160
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
6161
 
6162
		/* Unlock panel regs */
6163
		I915_WRITE(PP_CONTROL,
6164
			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
6165
 
6166
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
6167
		I915_WRITE(dpll_reg, dpll);
6168
		intel_wait_for_vblank(dev, pipe);
6169
 
6170
		dpll = I915_READ(dpll_reg);
6171
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
6172
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
6173
 
6174
		/* ...and lock them again */
6175
		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
6176
	}
6177
 
2336 Serge 6178
    LEAVE();
6179
 
2327 Serge 6180
	/* Schedule downclock */
6181
//	mod_timer(&intel_crtc->idle_timer, jiffies +
6182
//		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6183
}
6184
 
6185
 
6186
 
6187
 
6188
 
6189
 
6190
 
6191
 
6192
 
6193
 
6194
 
6195
 
6196
 
6197
 
6198
 
6199
 
6200
 
6201
 
6202
 
6203
 
6204
 
6205
 
2330 Serge 6206
static void intel_crtc_destroy(struct drm_crtc *crtc)
6207
{
6208
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6209
	struct drm_device *dev = crtc->dev;
6210
	struct intel_unpin_work *work;
6211
	unsigned long flags;
2327 Serge 6212
 
2330 Serge 6213
	spin_lock_irqsave(&dev->event_lock, flags);
6214
	work = intel_crtc->unpin_work;
6215
	intel_crtc->unpin_work = NULL;
6216
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 6217
 
2330 Serge 6218
	if (work) {
6219
//		cancel_work_sync(&work->work);
6220
		kfree(work);
6221
	}
2327 Serge 6222
 
2330 Serge 6223
	drm_crtc_cleanup(crtc);
2327 Serge 6224
 
2330 Serge 6225
	kfree(intel_crtc);
6226
}
2327 Serge 6227
 
6228
 
6229
 
6230
 
6231
 
6232
 
6233
 
6234
 
6235
 
6236
 
6237
 
6238
 
6239
 
6240
 
6241
 
6242
 
6243
 
6244
 
6245
 
6246
 
6247
 
6248
 
6249
 
6250
 
6251
 
6252
 
6253
 
6254
 
6255
 
6256
 
6257
 
6258
 
6259
 
6260
 
6261
 
6262
 
6263
 
6264
 
6265
 
6266
 
6267
 
6268
 
6269
 
6270
 
6271
 
6272
 
6273
 
6274
 
6275
 
6276
 
6277
 
6278
 
6279
 
6280
 
6281
 
6282
 
6283
 
6284
 
6285
 
6286
 
6287
 
6288
 
6289
 
6290
 
6291
 
6292
 
2330 Serge 6293
static void intel_sanitize_modesetting(struct drm_device *dev,
6294
				       int pipe, int plane)
6295
{
6296
	struct drm_i915_private *dev_priv = dev->dev_private;
6297
	u32 reg, val;
2327 Serge 6298
 
2330 Serge 6299
	if (HAS_PCH_SPLIT(dev))
6300
		return;
2327 Serge 6301
 
2330 Serge 6302
	/* Who knows what state these registers were left in by the BIOS or
6303
	 * grub?
6304
	 *
6305
	 * If we leave the registers in a conflicting state (e.g. with the
6306
	 * display plane reading from the other pipe than the one we intend
6307
	 * to use) then when we attempt to teardown the active mode, we will
6308
	 * not disable the pipes and planes in the correct order -- leaving
6309
	 * a plane reading from a disabled pipe and possibly leading to
6310
	 * undefined behaviour.
6311
	 */
2327 Serge 6312
 
2330 Serge 6313
	reg = DSPCNTR(plane);
6314
	val = I915_READ(reg);
2327 Serge 6315
 
2330 Serge 6316
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
6317
		return;
6318
	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6319
		return;
2327 Serge 6320
 
2330 Serge 6321
	/* This display plane is active and attached to the other CPU pipe. */
6322
	pipe = !pipe;
2327 Serge 6323
 
2330 Serge 6324
	/* Disable the plane and wait for it to stop reading from the pipe. */
6325
	intel_disable_plane(dev_priv, plane, pipe);
6326
	intel_disable_pipe(dev_priv, pipe);
6327
}
2327 Serge 6328
 
2330 Serge 6329
static void intel_crtc_reset(struct drm_crtc *crtc)
6330
{
6331
	struct drm_device *dev = crtc->dev;
6332
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6333
 
2330 Serge 6334
	/* Reset flags back to the 'unknown' status so that they
6335
	 * will be correctly set on the initial modeset.
6336
	 */
6337
	intel_crtc->dpms_mode = -1;
2327 Serge 6338
 
2330 Serge 6339
	/* We need to fix up any BIOS configuration that conflicts with
6340
	 * our expectations.
6341
	 */
6342
	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6343
}
2327 Serge 6344
 
2330 Serge 6345
static struct drm_crtc_helper_funcs intel_helper_funcs = {
6346
	.dpms = intel_crtc_dpms,
6347
	.mode_fixup = intel_crtc_mode_fixup,
6348
	.mode_set = intel_crtc_mode_set,
6349
	.mode_set_base = intel_pipe_set_base,
6350
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
6351
	.load_lut = intel_crtc_load_lut,
6352
	.disable = intel_crtc_disable,
6353
};
2327 Serge 6354
 
2330 Serge 6355
static const struct drm_crtc_funcs intel_crtc_funcs = {
6356
	.reset = intel_crtc_reset,
6357
//	.cursor_set = intel_crtc_cursor_set,
6358
//	.cursor_move = intel_crtc_cursor_move,
6359
	.gamma_set = intel_crtc_gamma_set,
6360
	.set_config = drm_crtc_helper_set_config,
6361
	.destroy = intel_crtc_destroy,
6362
//	.page_flip = intel_crtc_page_flip,
6363
};
2327 Serge 6364
 
2330 Serge 6365
static void intel_crtc_init(struct drm_device *dev, int pipe)
6366
{
6367
	drm_i915_private_t *dev_priv = dev->dev_private;
6368
	struct intel_crtc *intel_crtc;
6369
	int i;
2327 Serge 6370
 
2330 Serge 6371
    ENTER();
2327 Serge 6372
 
2330 Serge 6373
	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
6374
	if (intel_crtc == NULL)
6375
		return;
2327 Serge 6376
 
2330 Serge 6377
	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
2327 Serge 6378
 
2330 Serge 6379
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
6380
	for (i = 0; i < 256; i++) {
6381
		intel_crtc->lut_r[i] = i;
6382
		intel_crtc->lut_g[i] = i;
6383
		intel_crtc->lut_b[i] = i;
6384
	}
2327 Serge 6385
 
2330 Serge 6386
	/* Swap pipes & planes for FBC on pre-965 */
6387
	intel_crtc->pipe = pipe;
6388
	intel_crtc->plane = pipe;
6389
	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
6390
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
6391
		intel_crtc->plane = !pipe;
6392
	}
2327 Serge 6393
 
2330 Serge 6394
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
6395
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
6396
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6397
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 6398
 
2330 Serge 6399
	intel_crtc_reset(&intel_crtc->base);
6400
	intel_crtc->active = true; /* force the pipe off on setup_init_config */
6401
	intel_crtc->bpp = 24; /* default for pre-Ironlake */
2327 Serge 6402
 
2330 Serge 6403
	if (HAS_PCH_SPLIT(dev)) {
6404
		intel_helper_funcs.prepare = ironlake_crtc_prepare;
6405
		intel_helper_funcs.commit = ironlake_crtc_commit;
6406
	} else {
6407
		intel_helper_funcs.prepare = i9xx_crtc_prepare;
6408
		intel_helper_funcs.commit = i9xx_crtc_commit;
6409
	}
2327 Serge 6410
 
2330 Serge 6411
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
2327 Serge 6412
 
2330 Serge 6413
	intel_crtc->busy = false;
2327 Serge 6414
 
2330 Serge 6415
    LEAVE();
2327 Serge 6416
 
2330 Serge 6417
//	setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6418
//		    (unsigned long)intel_crtc);
6419
}
2327 Serge 6420
 
6421
 
6422
 
6423
 
6424
 
6425
 
6426
 
2330 Serge 6427
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
6428
{
6429
	struct intel_encoder *encoder;
6430
	int index_mask = 0;
6431
	int entry = 0;
2327 Serge 6432
 
2330 Serge 6433
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6434
		if (type_mask & encoder->clone_mask)
6435
			index_mask |= (1 << entry);
6436
		entry++;
6437
	}
2327 Serge 6438
 
2330 Serge 6439
	return index_mask;
6440
}
2327 Serge 6441
 
2330 Serge 6442
static bool has_edp_a(struct drm_device *dev)
6443
{
6444
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 6445
 
2330 Serge 6446
	if (!IS_MOBILE(dev))
6447
		return false;
2327 Serge 6448
 
2330 Serge 6449
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6450
		return false;
2327 Serge 6451
 
2330 Serge 6452
	if (IS_GEN5(dev) &&
6453
	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6454
		return false;
2327 Serge 6455
 
2330 Serge 6456
	return true;
6457
}
2327 Serge 6458
 
2330 Serge 6459
static void intel_setup_outputs(struct drm_device *dev)
6460
{
6461
	struct drm_i915_private *dev_priv = dev->dev_private;
6462
	struct intel_encoder *encoder;
6463
	bool dpd_is_edp = false;
6464
	bool has_lvds = false;
2327 Serge 6465
 
2336 Serge 6466
    ENTER();
6467
 
2330 Serge 6468
	if (IS_MOBILE(dev) && !IS_I830(dev))
6469
		has_lvds = intel_lvds_init(dev);
6470
	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6471
		/* disable the panel fitter on everything but LVDS */
6472
		I915_WRITE(PFIT_CONTROL, 0);
6473
	}
2327 Serge 6474
 
2330 Serge 6475
	if (HAS_PCH_SPLIT(dev)) {
6476
		dpd_is_edp = intel_dpd_is_edp(dev);
2327 Serge 6477
 
2330 Serge 6478
		if (has_edp_a(dev))
6479
			intel_dp_init(dev, DP_A);
2327 Serge 6480
 
2330 Serge 6481
		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6482
			intel_dp_init(dev, PCH_DP_D);
6483
	}
2327 Serge 6484
 
2330 Serge 6485
	intel_crt_init(dev);
2327 Serge 6486
 
2330 Serge 6487
	if (HAS_PCH_SPLIT(dev)) {
6488
		int found;
2327 Serge 6489
 
2330 Serge 6490
		if (I915_READ(HDMIB) & PORT_DETECTED) {
6491
			/* PCH SDVOB multiplex with HDMIB */
6492
			found = intel_sdvo_init(dev, PCH_SDVOB);
6493
			if (!found)
6494
				intel_hdmi_init(dev, HDMIB);
6495
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6496
				intel_dp_init(dev, PCH_DP_B);
6497
		}
2327 Serge 6498
 
2330 Serge 6499
		if (I915_READ(HDMIC) & PORT_DETECTED)
6500
			intel_hdmi_init(dev, HDMIC);
2327 Serge 6501
 
2330 Serge 6502
		if (I915_READ(HDMID) & PORT_DETECTED)
6503
			intel_hdmi_init(dev, HDMID);
2327 Serge 6504
 
2330 Serge 6505
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
6506
			intel_dp_init(dev, PCH_DP_C);
2327 Serge 6507
 
2330 Serge 6508
		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6509
			intel_dp_init(dev, PCH_DP_D);
2327 Serge 6510
 
2330 Serge 6511
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6512
		bool found = false;
2327 Serge 6513
 
2330 Serge 6514
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6515
			DRM_DEBUG_KMS("probing SDVOB\n");
6516
			found = intel_sdvo_init(dev, SDVOB);
6517
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6518
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6519
				intel_hdmi_init(dev, SDVOB);
6520
			}
2327 Serge 6521
 
2330 Serge 6522
			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6523
				DRM_DEBUG_KMS("probing DP_B\n");
6524
				intel_dp_init(dev, DP_B);
6525
			}
6526
		}
2327 Serge 6527
 
2330 Serge 6528
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 6529
 
2330 Serge 6530
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6531
			DRM_DEBUG_KMS("probing SDVOC\n");
6532
			found = intel_sdvo_init(dev, SDVOC);
6533
		}
2327 Serge 6534
 
2330 Serge 6535
		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
2327 Serge 6536
 
2330 Serge 6537
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6538
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6539
				intel_hdmi_init(dev, SDVOC);
6540
			}
6541
			if (SUPPORTS_INTEGRATED_DP(dev)) {
6542
				DRM_DEBUG_KMS("probing DP_C\n");
6543
				intel_dp_init(dev, DP_C);
6544
			}
6545
		}
2327 Serge 6546
 
2330 Serge 6547
		if (SUPPORTS_INTEGRATED_DP(dev) &&
6548
		    (I915_READ(DP_D) & DP_DETECTED)) {
6549
			DRM_DEBUG_KMS("probing DP_D\n");
6550
			intel_dp_init(dev, DP_D);
6551
		}
6552
	} else if (IS_GEN2(dev))
6553
		intel_dvo_init(dev);
2327 Serge 6554
 
2330 Serge 6555
//   if (SUPPORTS_TV(dev))
6556
//       intel_tv_init(dev);
2327 Serge 6557
 
2330 Serge 6558
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6559
		encoder->base.possible_crtcs = encoder->crtc_mask;
6560
		encoder->base.possible_clones =
6561
			intel_encoder_clones(dev, encoder->clone_mask);
6562
	}
2327 Serge 6563
 
2330 Serge 6564
	/* disable all the possible outputs/crtcs before entering KMS mode */
6565
//	drm_helper_disable_unused_functions(dev);
2336 Serge 6566
 
6567
    LEAVE();
2330 Serge 6568
}
6569
 
6570
 
6571
 
6572
 
2327 Serge 6573
static const struct drm_mode_config_funcs intel_mode_funcs = {
6574
	.fb_create = NULL /*intel_user_framebuffer_create*/,
6575
	.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
6576
};
6577
 
6578
 
6579
 
6580
 
6581
 
6582
 
6583
 
6584
 
6585
 
6586
 
6587
 
6588
 
6589
 
2335 Serge 6590
static const struct drm_framebuffer_funcs intel_fb_funcs = {
6591
//	.destroy = intel_user_framebuffer_destroy,
6592
//	.create_handle = intel_user_framebuffer_create_handle,
6593
};
2327 Serge 6594
 
2335 Serge 6595
int intel_framebuffer_init(struct drm_device *dev,
6596
			   struct intel_framebuffer *intel_fb,
6597
			   struct drm_mode_fb_cmd *mode_cmd,
6598
			   struct drm_i915_gem_object *obj)
6599
{
6600
	int ret;
2327 Serge 6601
 
2335 Serge 6602
	if (obj->tiling_mode == I915_TILING_Y)
6603
		return -EINVAL;
2327 Serge 6604
 
2335 Serge 6605
	if (mode_cmd->pitch & 63)
6606
		return -EINVAL;
2327 Serge 6607
 
2335 Serge 6608
	switch (mode_cmd->bpp) {
6609
	case 8:
6610
	case 16:
6611
		/* Only pre-ILK can handle 5:5:5 */
6612
		if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
6613
			return -EINVAL;
6614
		break;
2327 Serge 6615
 
2335 Serge 6616
	case 24:
6617
	case 32:
6618
		break;
6619
	default:
6620
		return -EINVAL;
6621
	}
2327 Serge 6622
 
2335 Serge 6623
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
6624
	if (ret) {
6625
		DRM_ERROR("framebuffer init failed %d\n", ret);
6626
		return ret;
6627
	}
2327 Serge 6628
 
2335 Serge 6629
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
6630
	intel_fb->obj = obj;
6631
	return 0;
6632
}
2327 Serge 6633
 
6634
 
6635
 
6636
 
6637
 
6638
 
6639
 
6640
 
6641
 
6642
 
6643
 
6644
 
2330 Serge 6645
bool ironlake_set_drps(struct drm_device *dev, u8 val)
6646
{
6647
	struct drm_i915_private *dev_priv = dev->dev_private;
6648
	u16 rgvswctl;
2327 Serge 6649
 
2330 Serge 6650
	rgvswctl = I915_READ16(MEMSWCTL);
6651
	if (rgvswctl & MEMCTL_CMD_STS) {
6652
		DRM_DEBUG("gpu busy, RCS change rejected\n");
6653
		return false; /* still busy with another command */
6654
	}
2327 Serge 6655
 
2330 Serge 6656
	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
6657
		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
6658
	I915_WRITE16(MEMSWCTL, rgvswctl);
6659
	POSTING_READ16(MEMSWCTL);
2327 Serge 6660
 
2330 Serge 6661
	rgvswctl |= MEMCTL_CMD_STS;
6662
	I915_WRITE16(MEMSWCTL, rgvswctl);
2327 Serge 6663
 
2330 Serge 6664
	return true;
6665
}
2327 Serge 6666
 
2330 Serge 6667
void ironlake_enable_drps(struct drm_device *dev)
6668
{
6669
	struct drm_i915_private *dev_priv = dev->dev_private;
6670
	u32 rgvmodectl = I915_READ(MEMMODECTL);
6671
	u8 fmax, fmin, fstart, vstart;
2327 Serge 6672
 
2330 Serge 6673
	/* Enable temp reporting */
6674
	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
6675
	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2327 Serge 6676
 
2330 Serge 6677
	/* 100ms RC evaluation intervals */
6678
	I915_WRITE(RCUPEI, 100000);
6679
	I915_WRITE(RCDNEI, 100000);
2327 Serge 6680
 
2330 Serge 6681
	/* Set max/min thresholds to 90ms and 80ms respectively */
6682
	I915_WRITE(RCBMAXAVG, 90000);
6683
	I915_WRITE(RCBMINAVG, 80000);
2327 Serge 6684
 
2330 Serge 6685
	I915_WRITE(MEMIHYST, 1);
2327 Serge 6686
 
2330 Serge 6687
	/* Set up min, max, and cur for interrupt handling */
6688
	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
6689
	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
6690
	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
6691
		MEMMODE_FSTART_SHIFT;
2327 Serge 6692
 
2330 Serge 6693
	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
6694
		PXVFREQ_PX_SHIFT;
2327 Serge 6695
 
2330 Serge 6696
	dev_priv->fmax = fmax; /* IPS callback will increase this */
6697
	dev_priv->fstart = fstart;
2327 Serge 6698
 
2330 Serge 6699
	dev_priv->max_delay = fstart;
6700
	dev_priv->min_delay = fmin;
6701
	dev_priv->cur_delay = fstart;
2327 Serge 6702
 
2330 Serge 6703
	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
6704
			 fmax, fmin, fstart);
2327 Serge 6705
 
2330 Serge 6706
	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2327 Serge 6707
 
2330 Serge 6708
	/*
6709
	 * Interrupts will be enabled in ironlake_irq_postinstall
6710
	 */
2327 Serge 6711
 
2330 Serge 6712
	I915_WRITE(VIDSTART, vstart);
6713
	POSTING_READ(VIDSTART);
2327 Serge 6714
 
2330 Serge 6715
	rgvmodectl |= MEMMODE_SWMODE_EN;
6716
	I915_WRITE(MEMMODECTL, rgvmodectl);
2327 Serge 6717
 
2330 Serge 6718
	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
6719
		DRM_ERROR("stuck trying to change perf mode\n");
6720
	msleep(1);
2327 Serge 6721
 
2330 Serge 6722
	ironlake_set_drps(dev, fstart);
2327 Serge 6723
 
2330 Serge 6724
	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
6725
		I915_READ(0x112e0);
6726
//   dev_priv->last_time1 = jiffies_to_msecs(jiffies);
6727
	dev_priv->last_count2 = I915_READ(0x112f4);
6728
//   getrawmonotonic(&dev_priv->last_time2);
6729
}
2327 Serge 6730
 
6731
 
6732
 
6733
 
6734
 
6735
 
6736
 
6737
 
6738
 
6739
 
6740
 
6741
 
6742
 
6743
 
6744
 
2330 Serge 6745
static unsigned long intel_pxfreq(u32 vidfreq)
6746
{
6747
	unsigned long freq;
6748
	int div = (vidfreq & 0x3f0000) >> 16;
6749
	int post = (vidfreq & 0x3000) >> 12;
6750
	int pre = (vidfreq & 0x7);
2327 Serge 6751
 
2330 Serge 6752
	if (!pre)
6753
		return 0;
2327 Serge 6754
 
2330 Serge 6755
	freq = ((div * 133333) / ((1<
2327 Serge 6756
 
2330 Serge 6757
	return freq;
6758
}
2327 Serge 6759
 
2330 Serge 6760
void intel_init_emon(struct drm_device *dev)
6761
{
6762
	struct drm_i915_private *dev_priv = dev->dev_private;
6763
	u32 lcfuse;
6764
	u8 pxw[16];
6765
	int i;
2327 Serge 6766
 
2330 Serge 6767
	/* Disable to program */
6768
	I915_WRITE(ECR, 0);
6769
	POSTING_READ(ECR);
2327 Serge 6770
 
2330 Serge 6771
	/* Program energy weights for various events */
6772
	I915_WRITE(SDEW, 0x15040d00);
6773
	I915_WRITE(CSIEW0, 0x007f0000);
6774
	I915_WRITE(CSIEW1, 0x1e220004);
6775
	I915_WRITE(CSIEW2, 0x04000004);
2327 Serge 6776
 
2330 Serge 6777
	for (i = 0; i < 5; i++)
6778
		I915_WRITE(PEW + (i * 4), 0);
6779
	for (i = 0; i < 3; i++)
6780
		I915_WRITE(DEW + (i * 4), 0);
2327 Serge 6781
 
2330 Serge 6782
	/* Program P-state weights to account for frequency power adjustment */
6783
	for (i = 0; i < 16; i++) {
6784
		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
6785
		unsigned long freq = intel_pxfreq(pxvidfreq);
6786
		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6787
			PXVFREQ_PX_SHIFT;
6788
		unsigned long val;
2327 Serge 6789
 
2330 Serge 6790
		val = vid * vid;
6791
		val *= (freq / 1000);
6792
		val *= 255;
6793
		val /= (127*127*900);
6794
		if (val > 0xff)
6795
			DRM_ERROR("bad pxval: %ld\n", val);
6796
		pxw[i] = val;
6797
	}
6798
	/* Render standby states get 0 weight */
6799
	pxw[14] = 0;
6800
	pxw[15] = 0;
2327 Serge 6801
 
2330 Serge 6802
	for (i = 0; i < 4; i++) {
6803
		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6804
			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6805
		I915_WRITE(PXW + (i * 4), val);
6806
	}
2327 Serge 6807
 
2330 Serge 6808
	/* Adjust magic regs to magic values (more experimental results) */
6809
	I915_WRITE(OGW0, 0);
6810
	I915_WRITE(OGW1, 0);
6811
	I915_WRITE(EG0, 0x00007f00);
6812
	I915_WRITE(EG1, 0x0000000e);
6813
	I915_WRITE(EG2, 0x000e0000);
6814
	I915_WRITE(EG3, 0x68000300);
6815
	I915_WRITE(EG4, 0x42000000);
6816
	I915_WRITE(EG5, 0x00140031);
6817
	I915_WRITE(EG6, 0);
6818
	I915_WRITE(EG7, 0);
2327 Serge 6819
 
2330 Serge 6820
	for (i = 0; i < 8; i++)
6821
		I915_WRITE(PXWL + (i * 4), 0);
2327 Serge 6822
 
2330 Serge 6823
	/* Enable PMON + select events */
6824
	I915_WRITE(ECR, 0x80000019);
2327 Serge 6825
 
2330 Serge 6826
	lcfuse = I915_READ(LCFUSE02);
6827
 
6828
	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
6829
}
6830
 
6831
void gen6_enable_rps(struct drm_i915_private *dev_priv)
6832
{
6833
	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6834
	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
6835
	u32 pcu_mbox, rc6_mask = 0;
6836
	int cur_freq, min_freq, max_freq;
6837
	int i;
6838
 
6839
	/* Here begins a magic sequence of register writes to enable
6840
	 * auto-downclocking.
6841
	 *
6842
	 * Perhaps there might be some value in exposing these to
6843
	 * userspace...
6844
	 */
6845
	I915_WRITE(GEN6_RC_STATE, 0);
6846
	mutex_lock(&dev_priv->dev->struct_mutex);
6847
	gen6_gt_force_wake_get(dev_priv);
6848
 
6849
	/* disable the counters and set deterministic thresholds */
6850
	I915_WRITE(GEN6_RC_CONTROL, 0);
6851
 
6852
	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
6853
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
6854
	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
6855
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6856
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6857
 
6858
	for (i = 0; i < I915_NUM_RINGS; i++)
6859
		I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
6860
 
6861
	I915_WRITE(GEN6_RC_SLEEP, 0);
6862
	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
6863
	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
6864
	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
6865
	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6866
 
6867
	if (i915_enable_rc6)
6868
		rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
6869
			GEN6_RC_CTL_RC6_ENABLE;
6870
 
6871
	I915_WRITE(GEN6_RC_CONTROL,
6872
		   rc6_mask |
6873
		   GEN6_RC_CTL_EI_MODE(1) |
6874
		   GEN6_RC_CTL_HW_ENABLE);
6875
 
6876
	I915_WRITE(GEN6_RPNSWREQ,
6877
		   GEN6_FREQUENCY(10) |
6878
		   GEN6_OFFSET(0) |
6879
		   GEN6_AGGRESSIVE_TURBO);
6880
	I915_WRITE(GEN6_RC_VIDEO_FREQ,
6881
		   GEN6_FREQUENCY(12));
6882
 
6883
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6884
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6885
		   18 << 24 |
6886
		   6 << 16);
6887
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
6888
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
6889
	I915_WRITE(GEN6_RP_UP_EI, 100000);
6890
	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
6891
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6892
	I915_WRITE(GEN6_RP_CONTROL,
6893
		   GEN6_RP_MEDIA_TURBO |
6894
		   GEN6_RP_USE_NORMAL_FREQ |
6895
		   GEN6_RP_MEDIA_IS_GFX |
6896
		   GEN6_RP_ENABLE |
6897
		   GEN6_RP_UP_BUSY_AVG |
6898
		   GEN6_RP_DOWN_IDLE_CONT);
6899
 
6900
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6901
		     500))
6902
		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6903
 
6904
	I915_WRITE(GEN6_PCODE_DATA, 0);
6905
	I915_WRITE(GEN6_PCODE_MAILBOX,
6906
		   GEN6_PCODE_READY |
6907
		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
6908
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6909
		     500))
6910
		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6911
 
6912
	min_freq = (rp_state_cap & 0xff0000) >> 16;
6913
	max_freq = rp_state_cap & 0xff;
6914
	cur_freq = (gt_perf_status & 0xff00) >> 8;
6915
 
6916
	/* Check for overclock support */
6917
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6918
		     500))
6919
		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6920
	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
6921
	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
6922
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6923
		     500))
6924
		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6925
	if (pcu_mbox & (1<<31)) { /* OC supported */
6926
		max_freq = pcu_mbox & 0xff;
6927
		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
6928
	}
6929
 
6930
	/* In units of 100MHz */
6931
	dev_priv->max_delay = max_freq;
6932
	dev_priv->min_delay = min_freq;
6933
	dev_priv->cur_delay = cur_freq;
6934
 
6935
	/* requires MSI enabled */
6936
	I915_WRITE(GEN6_PMIER,
6937
		   GEN6_PM_MBOX_EVENT |
6938
		   GEN6_PM_THERMAL_EVENT |
6939
		   GEN6_PM_RP_DOWN_TIMEOUT |
6940
		   GEN6_PM_RP_UP_THRESHOLD |
6941
		   GEN6_PM_RP_DOWN_THRESHOLD |
6942
		   GEN6_PM_RP_UP_EI_EXPIRED |
6943
		   GEN6_PM_RP_DOWN_EI_EXPIRED);
6944
//   spin_lock_irq(&dev_priv->rps_lock);
6945
//   WARN_ON(dev_priv->pm_iir != 0);
6946
	I915_WRITE(GEN6_PMIMR, 0);
6947
//   spin_unlock_irq(&dev_priv->rps_lock);
6948
	/* enable all PM interrupts */
6949
	I915_WRITE(GEN6_PMINTRMSK, 0);
6950
 
6951
	gen6_gt_force_wake_put(dev_priv);
6952
	mutex_unlock(&dev_priv->dev->struct_mutex);
6953
}
6954
 
6955
void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
6956
{
6957
	int min_freq = 15;
6958
	int gpu_freq, ia_freq, max_ia_freq;
6959
	int scaling_factor = 180;
6960
 
6961
//   max_ia_freq = cpufreq_quick_get_max(0);
6962
	/*
6963
	 * Default to measured freq if none found, PCU will ensure we don't go
6964
	 * over
6965
	 */
6966
//   if (!max_ia_freq)
6967
		max_ia_freq = 3000000; //tsc_khz;
6968
 
6969
	/* Convert from kHz to MHz */
6970
	max_ia_freq /= 1000;
6971
 
6972
	mutex_lock(&dev_priv->dev->struct_mutex);
6973
 
6974
	/*
6975
	 * For each potential GPU frequency, load a ring frequency we'd like
6976
	 * to use for memory access.  We do this by specifying the IA frequency
6977
	 * the PCU should use as a reference to determine the ring frequency.
6978
	 */
6979
	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
6980
	     gpu_freq--) {
6981
		int diff = dev_priv->max_delay - gpu_freq;
6982
 
6983
		/*
6984
		 * For GPU frequencies less than 750MHz, just use the lowest
6985
		 * ring freq.
6986
		 */
6987
		if (gpu_freq < min_freq)
6988
			ia_freq = 800;
6989
		else
6990
			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
6991
		ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
6992
 
6993
		I915_WRITE(GEN6_PCODE_DATA,
6994
			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
6995
			   gpu_freq);
6996
		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
6997
			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
6998
		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
6999
			      GEN6_PCODE_READY) == 0, 10)) {
7000
			DRM_ERROR("pcode write of freq table timed out\n");
7001
			continue;
7002
		}
7003
	}
7004
 
7005
	mutex_unlock(&dev_priv->dev->struct_mutex);
7006
}
7007
 
2327 Serge 7008
static void ironlake_init_clock_gating(struct drm_device *dev)
7009
{
7010
    struct drm_i915_private *dev_priv = dev->dev_private;
7011
    uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7012
 
7013
    /* Required for FBC */
7014
    dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
7015
        DPFCRUNIT_CLOCK_GATE_DISABLE |
7016
        DPFDUNIT_CLOCK_GATE_DISABLE;
7017
    /* Required for CxSR */
7018
    dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
7019
 
7020
    I915_WRITE(PCH_3DCGDIS0,
7021
           MARIUNIT_CLOCK_GATE_DISABLE |
7022
           SVSMUNIT_CLOCK_GATE_DISABLE);
7023
    I915_WRITE(PCH_3DCGDIS1,
7024
           VFMUNIT_CLOCK_GATE_DISABLE);
7025
 
7026
    I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7027
 
7028
    /*
7029
     * According to the spec the following bits should be set in
7030
     * order to enable memory self-refresh
7031
     * The bit 22/21 of 0x42004
7032
     * The bit 5 of 0x42020
7033
     * The bit 15 of 0x45000
7034
     */
7035
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
7036
           (I915_READ(ILK_DISPLAY_CHICKEN2) |
7037
            ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7038
    I915_WRITE(ILK_DSPCLK_GATE,
7039
           (I915_READ(ILK_DSPCLK_GATE) |
7040
            ILK_DPARB_CLK_GATE));
7041
    I915_WRITE(DISP_ARB_CTL,
7042
           (I915_READ(DISP_ARB_CTL) |
7043
            DISP_FBC_WM_DIS));
7044
    I915_WRITE(WM3_LP_ILK, 0);
7045
    I915_WRITE(WM2_LP_ILK, 0);
7046
    I915_WRITE(WM1_LP_ILK, 0);
7047
 
7048
    /*
7049
     * Based on the document from hardware guys the following bits
7050
     * should be set unconditionally in order to enable FBC.
7051
     * The bit 22 of 0x42000
7052
     * The bit 22 of 0x42004
7053
     * The bit 7,8,9 of 0x42020.
7054
     */
7055
    if (IS_IRONLAKE_M(dev)) {
7056
        I915_WRITE(ILK_DISPLAY_CHICKEN1,
7057
               I915_READ(ILK_DISPLAY_CHICKEN1) |
7058
               ILK_FBCQ_DIS);
7059
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
7060
               I915_READ(ILK_DISPLAY_CHICKEN2) |
7061
               ILK_DPARB_GATE);
7062
        I915_WRITE(ILK_DSPCLK_GATE,
7063
               I915_READ(ILK_DSPCLK_GATE) |
7064
               ILK_DPFC_DIS1 |
7065
               ILK_DPFC_DIS2 |
7066
               ILK_CLK_FBC);
7067
    }
7068
 
7069
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
7070
           I915_READ(ILK_DISPLAY_CHICKEN2) |
7071
           ILK_ELPIN_409_SELECT);
7072
    I915_WRITE(_3D_CHICKEN2,
7073
           _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7074
           _3D_CHICKEN2_WM_READ_PIPELINED);
7075
}
7076
 
7077
static void gen6_init_clock_gating(struct drm_device *dev)
7078
{
7079
	struct drm_i915_private *dev_priv = dev->dev_private;
7080
	int pipe;
7081
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7082
 
7083
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7084
 
7085
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7086
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7087
		   ILK_ELPIN_409_SELECT);
7088
 
7089
	I915_WRITE(WM3_LP_ILK, 0);
7090
	I915_WRITE(WM2_LP_ILK, 0);
7091
	I915_WRITE(WM1_LP_ILK, 0);
7092
 
7093
	/*
7094
	 * According to the spec the following bits should be
7095
	 * set in order to enable memory self-refresh and fbc:
7096
	 * The bit21 and bit22 of 0x42000
7097
	 * The bit21 and bit22 of 0x42004
7098
	 * The bit5 and bit7 of 0x42020
7099
	 * The bit14 of 0x70180
7100
	 * The bit14 of 0x71180
7101
	 */
7102
	I915_WRITE(ILK_DISPLAY_CHICKEN1,
7103
		   I915_READ(ILK_DISPLAY_CHICKEN1) |
7104
		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7105
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7106
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7107
		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7108
	I915_WRITE(ILK_DSPCLK_GATE,
7109
		   I915_READ(ILK_DSPCLK_GATE) |
7110
		   ILK_DPARB_CLK_GATE  |
7111
		   ILK_DPFD_CLK_GATE);
7112
 
7113
	for_each_pipe(pipe) {
7114
		I915_WRITE(DSPCNTR(pipe),
7115
			   I915_READ(DSPCNTR(pipe)) |
7116
			   DISPPLANE_TRICKLE_FEED_DISABLE);
7117
		intel_flush_display_plane(dev_priv, pipe);
7118
	}
7119
}
7120
 
7121
static void ivybridge_init_clock_gating(struct drm_device *dev)
7122
{
7123
	struct drm_i915_private *dev_priv = dev->dev_private;
7124
	int pipe;
7125
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7126
 
7127
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7128
 
7129
	I915_WRITE(WM3_LP_ILK, 0);
7130
	I915_WRITE(WM2_LP_ILK, 0);
7131
	I915_WRITE(WM1_LP_ILK, 0);
7132
 
7133
	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
7134
 
7135
	for_each_pipe(pipe) {
7136
		I915_WRITE(DSPCNTR(pipe),
7137
			   I915_READ(DSPCNTR(pipe)) |
7138
			   DISPPLANE_TRICKLE_FEED_DISABLE);
7139
		intel_flush_display_plane(dev_priv, pipe);
7140
	}
7141
}
7142
 
7143
static void g4x_init_clock_gating(struct drm_device *dev)
7144
{
7145
    struct drm_i915_private *dev_priv = dev->dev_private;
7146
    uint32_t dspclk_gate;
7147
 
7148
    I915_WRITE(RENCLK_GATE_D1, 0);
7149
    I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7150
           GS_UNIT_CLOCK_GATE_DISABLE |
7151
           CL_UNIT_CLOCK_GATE_DISABLE);
7152
    I915_WRITE(RAMCLK_GATE_D, 0);
7153
    dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7154
        OVRUNIT_CLOCK_GATE_DISABLE |
7155
        OVCUNIT_CLOCK_GATE_DISABLE;
7156
    if (IS_GM45(dev))
7157
        dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7158
    I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7159
}
7160
 
7161
static void crestline_init_clock_gating(struct drm_device *dev)
7162
{
7163
	struct drm_i915_private *dev_priv = dev->dev_private;
7164
 
7165
	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7166
	I915_WRITE(RENCLK_GATE_D2, 0);
7167
	I915_WRITE(DSPCLK_GATE_D, 0);
7168
	I915_WRITE(RAMCLK_GATE_D, 0);
7169
	I915_WRITE16(DEUC, 0);
7170
}
7171
 
7172
static void broadwater_init_clock_gating(struct drm_device *dev)
7173
{
7174
	struct drm_i915_private *dev_priv = dev->dev_private;
7175
 
7176
	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7177
		   I965_RCC_CLOCK_GATE_DISABLE |
7178
		   I965_RCPB_CLOCK_GATE_DISABLE |
7179
		   I965_ISC_CLOCK_GATE_DISABLE |
7180
		   I965_FBC_CLOCK_GATE_DISABLE);
7181
	I915_WRITE(RENCLK_GATE_D2, 0);
7182
}
7183
 
7184
static void gen3_init_clock_gating(struct drm_device *dev)
7185
{
7186
    struct drm_i915_private *dev_priv = dev->dev_private;
7187
    u32 dstate = I915_READ(D_STATE);
7188
 
7189
    dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7190
        DSTATE_DOT_CLOCK_GATING;
7191
    I915_WRITE(D_STATE, dstate);
7192
}
7193
 
7194
static void i85x_init_clock_gating(struct drm_device *dev)
7195
{
7196
	struct drm_i915_private *dev_priv = dev->dev_private;
7197
 
7198
	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7199
}
7200
 
7201
static void i830_init_clock_gating(struct drm_device *dev)
7202
{
7203
	struct drm_i915_private *dev_priv = dev->dev_private;
7204
 
7205
	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7206
}
7207
 
7208
static void ibx_init_clock_gating(struct drm_device *dev)
7209
{
7210
    struct drm_i915_private *dev_priv = dev->dev_private;
7211
 
7212
    /*
7213
     * On Ibex Peak and Cougar Point, we need to disable clock
7214
     * gating for the panel power sequencer or it will fail to
7215
     * start up when no ports are active.
7216
     */
7217
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7218
}
7219
 
7220
static void cpt_init_clock_gating(struct drm_device *dev)
7221
{
7222
    struct drm_i915_private *dev_priv = dev->dev_private;
7223
    int pipe;
7224
 
7225
    /*
7226
     * On Ibex Peak and Cougar Point, we need to disable clock
7227
     * gating for the panel power sequencer or it will fail to
7228
     * start up when no ports are active.
7229
     */
7230
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7231
    I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7232
           DPLS_EDP_PPS_FIX_DIS);
7233
    /* Without this, mode sets may fail silently on FDI */
7234
    for_each_pipe(pipe)
7235
        I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
7236
}
7237
 
2332 Serge 7238
static void ironlake_teardown_rc6(struct drm_device *dev)
7239
{
7240
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 7241
 
2332 Serge 7242
	if (dev_priv->renderctx) {
7243
//		i915_gem_object_unpin(dev_priv->renderctx);
7244
//		drm_gem_object_unreference(&dev_priv->renderctx->base);
7245
		dev_priv->renderctx = NULL;
7246
	}
2327 Serge 7247
 
2332 Serge 7248
	if (dev_priv->pwrctx) {
7249
//		i915_gem_object_unpin(dev_priv->pwrctx);
7250
//		drm_gem_object_unreference(&dev_priv->pwrctx->base);
7251
		dev_priv->pwrctx = NULL;
7252
	}
7253
}
2327 Serge 7254
 
2330 Serge 7255
 
2332 Serge 7256
 
7257
 
7258
 
7259
 
7260
 
7261
static int ironlake_setup_rc6(struct drm_device *dev)
7262
{
7263
	struct drm_i915_private *dev_priv = dev->dev_private;
7264
 
7265
	if (dev_priv->renderctx == NULL)
7266
//		dev_priv->renderctx = intel_alloc_context_page(dev);
7267
	if (!dev_priv->renderctx)
7268
		return -ENOMEM;
7269
 
7270
	if (dev_priv->pwrctx == NULL)
7271
//		dev_priv->pwrctx = intel_alloc_context_page(dev);
7272
	if (!dev_priv->pwrctx) {
7273
		ironlake_teardown_rc6(dev);
7274
		return -ENOMEM;
7275
	}
7276
 
7277
	return 0;
7278
}
7279
 
7280
void ironlake_enable_rc6(struct drm_device *dev)
7281
{
7282
	struct drm_i915_private *dev_priv = dev->dev_private;
7283
	int ret;
7284
 
7285
	/* rc6 disabled by default due to repeated reports of hanging during
7286
	 * boot and resume.
7287
	 */
7288
	if (!i915_enable_rc6)
7289
		return;
7290
 
7291
	mutex_lock(&dev->struct_mutex);
7292
	ret = ironlake_setup_rc6(dev);
7293
	if (ret) {
7294
		mutex_unlock(&dev->struct_mutex);
7295
		return;
7296
	}
7297
 
7298
	/*
7299
	 * GPU can automatically power down the render unit if given a page
7300
	 * to save state.
7301
	 */
7302
#if 0
7303
	ret = BEGIN_LP_RING(6);
7304
	if (ret) {
7305
		ironlake_teardown_rc6(dev);
7306
		mutex_unlock(&dev->struct_mutex);
7307
		return;
7308
	}
7309
 
7310
	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
7311
	OUT_RING(MI_SET_CONTEXT);
7312
	OUT_RING(dev_priv->renderctx->gtt_offset |
7313
		 MI_MM_SPACE_GTT |
7314
		 MI_SAVE_EXT_STATE_EN |
7315
		 MI_RESTORE_EXT_STATE_EN |
7316
		 MI_RESTORE_INHIBIT);
7317
	OUT_RING(MI_SUSPEND_FLUSH);
7318
	OUT_RING(MI_NOOP);
7319
	OUT_RING(MI_FLUSH);
7320
	ADVANCE_LP_RING();
7321
 
7322
	/*
7323
	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
7324
	 * does an implicit flush, combined with MI_FLUSH above, it should be
7325
	 * safe to assume that renderctx is valid
7326
	 */
7327
	ret = intel_wait_ring_idle(LP_RING(dev_priv));
7328
	if (ret) {
7329
		DRM_ERROR("failed to enable ironlake power power savings\n");
7330
		ironlake_teardown_rc6(dev);
7331
		mutex_unlock(&dev->struct_mutex);
7332
		return;
7333
	}
7334
#endif
7335
 
7336
	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
7337
	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7338
	mutex_unlock(&dev->struct_mutex);
7339
}
7340
 
2330 Serge 7341
void intel_init_clock_gating(struct drm_device *dev)
7342
{
7343
	struct drm_i915_private *dev_priv = dev->dev_private;
7344
 
7345
	dev_priv->display.init_clock_gating(dev);
7346
 
7347
	if (dev_priv->display.init_pch_clock_gating)
7348
		dev_priv->display.init_pch_clock_gating(dev);
7349
}
7350
 
2327 Serge 7351
/* Set up chip specific display functions */
7352
static void intel_init_display(struct drm_device *dev)
7353
{
7354
    struct drm_i915_private *dev_priv = dev->dev_private;
7355
 
7356
    /* We always want a DPMS function */
7357
    if (HAS_PCH_SPLIT(dev)) {
7358
        dev_priv->display.dpms = ironlake_crtc_dpms;
7359
        dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7360
        dev_priv->display.update_plane = ironlake_update_plane;
7361
    } else {
7362
        dev_priv->display.dpms = i9xx_crtc_dpms;
7363
        dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7364
        dev_priv->display.update_plane = i9xx_update_plane;
7365
    }
7366
 
7367
    if (I915_HAS_FBC(dev)) {
7368
        if (HAS_PCH_SPLIT(dev)) {
7369
            dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7370
            dev_priv->display.enable_fbc = ironlake_enable_fbc;
7371
            dev_priv->display.disable_fbc = ironlake_disable_fbc;
7372
        } else if (IS_GM45(dev)) {
7373
            dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7374
            dev_priv->display.enable_fbc = g4x_enable_fbc;
7375
            dev_priv->display.disable_fbc = g4x_disable_fbc;
7376
        } else if (IS_CRESTLINE(dev)) {
7377
            dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7378
            dev_priv->display.enable_fbc = i8xx_enable_fbc;
7379
            dev_priv->display.disable_fbc = i8xx_disable_fbc;
7380
        }
7381
        /* 855GM needs testing */
7382
    }
7383
 
7384
    /* Returns the core display clock speed */
7385
    if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
7386
        dev_priv->display.get_display_clock_speed =
7387
            i945_get_display_clock_speed;
7388
    else if (IS_I915G(dev))
7389
        dev_priv->display.get_display_clock_speed =
7390
            i915_get_display_clock_speed;
7391
    else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
7392
        dev_priv->display.get_display_clock_speed =
7393
            i9xx_misc_get_display_clock_speed;
7394
    else if (IS_I915GM(dev))
7395
        dev_priv->display.get_display_clock_speed =
7396
            i915gm_get_display_clock_speed;
7397
    else if (IS_I865G(dev))
7398
        dev_priv->display.get_display_clock_speed =
7399
            i865_get_display_clock_speed;
7400
    else if (IS_I85X(dev))
7401
        dev_priv->display.get_display_clock_speed =
7402
            i855_get_display_clock_speed;
7403
    else /* 852, 830 */
7404
        dev_priv->display.get_display_clock_speed =
7405
            i830_get_display_clock_speed;
7406
 
7407
    /* For FIFO watermark updates */
7408
    if (HAS_PCH_SPLIT(dev)) {
7409
        if (HAS_PCH_IBX(dev))
7410
            dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
7411
        else if (HAS_PCH_CPT(dev))
7412
            dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
7413
 
7414
        if (IS_GEN5(dev)) {
7415
            if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
7416
                dev_priv->display.update_wm = ironlake_update_wm;
7417
            else {
7418
                DRM_DEBUG_KMS("Failed to get proper latency. "
7419
                          "Disable CxSR\n");
7420
                dev_priv->display.update_wm = NULL;
7421
            }
7422
            dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
7423
            dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7424
        } else if (IS_GEN6(dev)) {
7425
            if (SNB_READ_WM0_LATENCY()) {
7426
                dev_priv->display.update_wm = sandybridge_update_wm;
7427
            } else {
7428
                DRM_DEBUG_KMS("Failed to read display plane latency. "
7429
                          "Disable CxSR\n");
7430
                dev_priv->display.update_wm = NULL;
7431
            }
7432
            dev_priv->display.fdi_link_train = gen6_fdi_link_train;
7433
            dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7434
        } else if (IS_IVYBRIDGE(dev)) {
7435
            /* FIXME: detect B0+ stepping and use auto training */
7436
            dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
7437
            if (SNB_READ_WM0_LATENCY()) {
7438
                dev_priv->display.update_wm = sandybridge_update_wm;
7439
            } else {
7440
                DRM_DEBUG_KMS("Failed to read display plane latency. "
7441
                          "Disable CxSR\n");
7442
                dev_priv->display.update_wm = NULL;
7443
            }
7444
            dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7445
 
7446
        } else
7447
            dev_priv->display.update_wm = NULL;
7448
    } else if (IS_PINEVIEW(dev)) {
7449
        if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7450
                        dev_priv->is_ddr3,
7451
                        dev_priv->fsb_freq,
7452
                        dev_priv->mem_freq)) {
7453
            DRM_INFO("failed to find known CxSR latency "
7454
                 "(found ddr%s fsb freq %d, mem freq %d), "
7455
                 "disabling CxSR\n",
7456
                 (dev_priv->is_ddr3 == 1) ? "3": "2",
7457
                 dev_priv->fsb_freq, dev_priv->mem_freq);
7458
            /* Disable CxSR and never update its watermark again */
7459
            pineview_disable_cxsr(dev);
7460
            dev_priv->display.update_wm = NULL;
7461
        } else
7462
            dev_priv->display.update_wm = pineview_update_wm;
7463
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7464
    } else if (IS_G4X(dev)) {
7465
        dev_priv->display.update_wm = g4x_update_wm;
7466
        dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7467
    } else if (IS_GEN4(dev)) {
7468
        dev_priv->display.update_wm = i965_update_wm;
7469
        if (IS_CRESTLINE(dev))
7470
            dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7471
        else if (IS_BROADWATER(dev))
7472
            dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7473
    } else if (IS_GEN3(dev)) {
7474
        dev_priv->display.update_wm = i9xx_update_wm;
7475
        dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7476
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7477
    } else if (IS_I865G(dev)) {
7478
        dev_priv->display.update_wm = i830_update_wm;
7479
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7480
        dev_priv->display.get_fifo_size = i830_get_fifo_size;
7481
    } else if (IS_I85X(dev)) {
7482
        dev_priv->display.update_wm = i9xx_update_wm;
7483
        dev_priv->display.get_fifo_size = i85x_get_fifo_size;
7484
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7485
    } else {
7486
        dev_priv->display.update_wm = i830_update_wm;
7487
        dev_priv->display.init_clock_gating = i830_init_clock_gating;
7488
        if (IS_845G(dev))
7489
            dev_priv->display.get_fifo_size = i845_get_fifo_size;
7490
        else
7491
            dev_priv->display.get_fifo_size = i830_get_fifo_size;
7492
    }
7493
 
7494
    /* Default just returns -ENODEV to indicate unsupported */
7495
//    dev_priv->display.queue_flip = intel_default_queue_flip;
7496
 
7497
#if 0
7498
    switch (INTEL_INFO(dev)->gen) {
7499
    case 2:
7500
        dev_priv->display.queue_flip = intel_gen2_queue_flip;
7501
        break;
7502
 
7503
    case 3:
7504
        dev_priv->display.queue_flip = intel_gen3_queue_flip;
7505
        break;
7506
 
7507
    case 4:
7508
    case 5:
7509
        dev_priv->display.queue_flip = intel_gen4_queue_flip;
7510
        break;
7511
 
7512
    case 6:
7513
        dev_priv->display.queue_flip = intel_gen6_queue_flip;
7514
        break;
7515
    case 7:
7516
        dev_priv->display.queue_flip = intel_gen7_queue_flip;
7517
        break;
7518
    }
7519
#endif
7520
}
7521
 
7522
/*
7523
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
7524
 * resume, or other times.  This quirk makes sure that's the case for
7525
 * affected systems.
7526
 */
7527
static void quirk_pipea_force (struct drm_device *dev)
7528
{
7529
    struct drm_i915_private *dev_priv = dev->dev_private;
7530
 
7531
    dev_priv->quirks |= QUIRK_PIPEA_FORCE;
7532
    DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
7533
}
7534
 
7535
/*
7536
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
7537
 */
7538
static void quirk_ssc_force_disable(struct drm_device *dev)
7539
{
7540
    struct drm_i915_private *dev_priv = dev->dev_private;
7541
    dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
7542
}
7543
 
7544
struct intel_quirk {
7545
    int device;
7546
    int subsystem_vendor;
7547
    int subsystem_device;
7548
    void (*hook)(struct drm_device *dev);
7549
};
7550
 
7551
struct intel_quirk intel_quirks[] = {
7552
    /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
7553
    { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
7554
    /* HP Mini needs pipe A force quirk (LP: #322104) */
7555
    { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
7556
 
7557
    /* Thinkpad R31 needs pipe A force quirk */
7558
    { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
7559
    /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
7560
    { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
7561
 
7562
    /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
7563
    { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
7564
    /* ThinkPad X40 needs pipe A force quirk */
7565
 
7566
    /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
7567
    { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
7568
 
7569
    /* 855 & before need to leave pipe A & dpll A up */
7570
    { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7571
    { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7572
 
7573
    /* Lenovo U160 cannot use SSC on LVDS */
7574
    { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
7575
 
7576
    /* Sony Vaio Y cannot use SSC on LVDS */
7577
    { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
7578
};
7579
 
7580
static void intel_init_quirks(struct drm_device *dev)
7581
{
7582
    struct pci_dev *d = dev->pdev;
7583
    int i;
7584
 
7585
    for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
7586
        struct intel_quirk *q = &intel_quirks[i];
7587
 
7588
        if (d->device == q->device &&
7589
            (d->subsystem_vendor == q->subsystem_vendor ||
7590
             q->subsystem_vendor == PCI_ANY_ID) &&
7591
            (d->subsystem_device == q->subsystem_device ||
7592
             q->subsystem_device == PCI_ANY_ID))
7593
            q->hook(dev);
7594
    }
7595
}
7596
 
2330 Serge 7597
/* Disable the VGA plane that we never use */
7598
static void i915_disable_vga(struct drm_device *dev)
7599
{
7600
	struct drm_i915_private *dev_priv = dev->dev_private;
7601
	u8 sr1;
7602
	u32 vga_reg;
2327 Serge 7603
 
2330 Serge 7604
	if (HAS_PCH_SPLIT(dev))
7605
		vga_reg = CPU_VGACNTRL;
7606
	else
7607
		vga_reg = VGACNTRL;
7608
 
7609
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
7610
    out8(VGA_SR_INDEX, 1);
7611
    sr1 = in8(VGA_SR_DATA);
7612
    out8(VGA_SR_DATA,sr1 | 1<<5);
7613
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
7614
	udelay(300);
7615
 
7616
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
7617
	POSTING_READ(vga_reg);
7618
}
7619
 
2327 Serge 7620
void intel_modeset_init(struct drm_device *dev)
7621
{
7622
    struct drm_i915_private *dev_priv = dev->dev_private;
7623
    int i;
7624
 
7625
    drm_mode_config_init(dev);
7626
 
7627
    dev->mode_config.min_width = 0;
7628
    dev->mode_config.min_height = 0;
7629
 
7630
    dev->mode_config.funcs = (void *)&intel_mode_funcs;
7631
 
7632
    intel_init_quirks(dev);
7633
 
7634
    intel_init_display(dev);
7635
 
7636
    if (IS_GEN2(dev)) {
7637
        dev->mode_config.max_width = 2048;
7638
        dev->mode_config.max_height = 2048;
7639
    } else if (IS_GEN3(dev)) {
7640
        dev->mode_config.max_width = 4096;
7641
        dev->mode_config.max_height = 4096;
7642
    } else {
7643
        dev->mode_config.max_width = 8192;
7644
        dev->mode_config.max_height = 8192;
7645
    }
7646
    dev->mode_config.fb_base = get_bus_addr();
7647
 
7648
    DRM_DEBUG_KMS("%d display pipe%s available.\n",
7649
              dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
7650
 
7651
    for (i = 0; i < dev_priv->num_pipe; i++) {
7652
        intel_crtc_init(dev, i);
7653
    }
7654
 
7655
    /* Just disable it once at startup */
7656
    i915_disable_vga(dev);
7657
    intel_setup_outputs(dev);
7658
 
7659
    intel_init_clock_gating(dev);
7660
 
7661
    if (IS_IRONLAKE_M(dev)) {
7662
        ironlake_enable_drps(dev);
7663
        intel_init_emon(dev);
7664
    }
7665
 
7666
    if (IS_GEN6(dev) || IS_GEN7(dev)) {
7667
        gen6_enable_rps(dev_priv);
7668
        gen6_update_ring_freq(dev_priv);
7669
    }
7670
 
2332 Serge 7671
//   INIT_WORK(&dev_priv->idle_work, intel_idle_update);
7672
//   setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
7673
//           (unsigned long)dev);
2330 Serge 7674
}
2327 Serge 7675
 
2332 Serge 7676
void intel_modeset_gem_init(struct drm_device *dev)
7677
{
7678
	if (IS_IRONLAKE_M(dev))
7679
		ironlake_enable_rc6(dev);
2330 Serge 7680
 
2332 Serge 7681
//	intel_setup_overlay(dev);
7682
}
7683
 
7684
 
2330 Serge 7685
/*
7686
 * Return which encoder is currently attached for connector.
7687
 */
7688
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
7689
{
7690
	return &intel_attached_encoder(connector)->base;
2327 Serge 7691
}
7692
 
2330 Serge 7693
void intel_connector_attach_encoder(struct intel_connector *connector,
7694
				    struct intel_encoder *encoder)
7695
{
7696
	connector->encoder = encoder;
7697
	drm_mode_connector_attach_encoder(&connector->base,
7698
					  &encoder->base);
7699
}
2327 Serge 7700
 
2330 Serge 7701