Subversion Repositories Kolibri OS

Rev

Rev 2339 | Rev 2342 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
27
//#include 
28
#include 
29
//#include 
30
#include 
31
#include 
2330 Serge 32
#include 
2327 Serge 33
//#include 
34
#include "drmP.h"
35
#include "intel_drv.h"
2330 Serge 36
#include "i915_drm.h"
2327 Serge 37
#include "i915_drv.h"
38
//#include "i915_trace.h"
39
#include "drm_dp_helper.h"
40
 
41
#include "drm_crtc_helper.h"
42
 
43
phys_addr_t get_bus_addr(void);
44
 
45
static inline __attribute__((const))
46
bool is_power_of_2(unsigned long n)
47
{
48
    return (n != 0 && ((n & (n - 1)) == 0));
49
}
50
 
2330 Serge 51
#define MAX_ERRNO       4095
52
 
53
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
54
 
55
static inline long IS_ERR(const void *ptr)
56
{
57
    return IS_ERR_VALUE((unsigned long)ptr);
58
}
59
 
60
static inline void *ERR_PTR(long error)
61
{
62
    return (void *) error;
63
}
64
 
65
 
2327 Serge 66
static inline int pci_read_config_word(struct pci_dev *dev, int where,
67
                    u16 *val)
68
{
69
    *val = PciRead16(dev->busnr, dev->devfn, where);
70
    return 1;
71
}
72
 
73
 
74
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
75
 
76
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
77
static void intel_update_watermarks(struct drm_device *dev);
78
static void intel_increase_pllclock(struct drm_crtc *crtc);
79
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
80
 
81
typedef struct {
82
    /* given values */
83
    int n;
84
    int m1, m2;
85
    int p1, p2;
86
    /* derived values */
87
    int dot;
88
    int vco;
89
    int m;
90
    int p;
91
} intel_clock_t;
92
 
93
typedef struct {
94
    int min, max;
95
} intel_range_t;
96
 
97
typedef struct {
98
    int dot_limit;
99
    int p2_slow, p2_fast;
100
} intel_p2_t;
101
 
102
#define INTEL_P2_NUM              2
103
typedef struct intel_limit intel_limit_t;
104
struct intel_limit {
105
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
106
    intel_p2_t      p2;
107
    bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
108
              int, int, intel_clock_t *);
109
};
110
 
111
/* FDI */
112
#define IRONLAKE_FDI_FREQ       2700000 /* in kHz for mode->clock */
113
 
114
static bool
115
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
116
            int target, int refclk, intel_clock_t *best_clock);
117
static bool
118
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
119
            int target, int refclk, intel_clock_t *best_clock);
120
 
121
static bool
122
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
123
              int target, int refclk, intel_clock_t *best_clock);
124
static bool
125
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
126
               int target, int refclk, intel_clock_t *best_clock);
127
 
128
static inline u32 /* units of 100MHz */
129
intel_fdi_link_freq(struct drm_device *dev)
130
{
131
	if (IS_GEN5(dev)) {
132
		struct drm_i915_private *dev_priv = dev->dev_private;
133
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
134
	} else
135
		return 27;
136
}
137
 
138
static const intel_limit_t intel_limits_i8xx_dvo = {
139
        .dot = { .min = 25000, .max = 350000 },
140
        .vco = { .min = 930000, .max = 1400000 },
141
        .n = { .min = 3, .max = 16 },
142
        .m = { .min = 96, .max = 140 },
143
        .m1 = { .min = 18, .max = 26 },
144
        .m2 = { .min = 6, .max = 16 },
145
        .p = { .min = 4, .max = 128 },
146
        .p1 = { .min = 2, .max = 33 },
147
	.p2 = { .dot_limit = 165000,
148
		.p2_slow = 4, .p2_fast = 2 },
149
	.find_pll = intel_find_best_PLL,
150
};
151
 
152
static const intel_limit_t intel_limits_i8xx_lvds = {
153
        .dot = { .min = 25000, .max = 350000 },
154
        .vco = { .min = 930000, .max = 1400000 },
155
        .n = { .min = 3, .max = 16 },
156
        .m = { .min = 96, .max = 140 },
157
        .m1 = { .min = 18, .max = 26 },
158
        .m2 = { .min = 6, .max = 16 },
159
        .p = { .min = 4, .max = 128 },
160
        .p1 = { .min = 1, .max = 6 },
161
	.p2 = { .dot_limit = 165000,
162
		.p2_slow = 14, .p2_fast = 7 },
163
	.find_pll = intel_find_best_PLL,
164
};
165
 
166
static const intel_limit_t intel_limits_i9xx_sdvo = {
167
        .dot = { .min = 20000, .max = 400000 },
168
        .vco = { .min = 1400000, .max = 2800000 },
169
        .n = { .min = 1, .max = 6 },
170
        .m = { .min = 70, .max = 120 },
171
        .m1 = { .min = 10, .max = 22 },
172
        .m2 = { .min = 5, .max = 9 },
173
        .p = { .min = 5, .max = 80 },
174
        .p1 = { .min = 1, .max = 8 },
175
	.p2 = { .dot_limit = 200000,
176
		.p2_slow = 10, .p2_fast = 5 },
177
	.find_pll = intel_find_best_PLL,
178
};
179
 
180
static const intel_limit_t intel_limits_i9xx_lvds = {
181
        .dot = { .min = 20000, .max = 400000 },
182
        .vco = { .min = 1400000, .max = 2800000 },
183
        .n = { .min = 1, .max = 6 },
184
        .m = { .min = 70, .max = 120 },
185
        .m1 = { .min = 10, .max = 22 },
186
        .m2 = { .min = 5, .max = 9 },
187
        .p = { .min = 7, .max = 98 },
188
        .p1 = { .min = 1, .max = 8 },
189
	.p2 = { .dot_limit = 112000,
190
		.p2_slow = 14, .p2_fast = 7 },
191
	.find_pll = intel_find_best_PLL,
192
};
193
 
194
 
195
static const intel_limit_t intel_limits_g4x_sdvo = {
196
	.dot = { .min = 25000, .max = 270000 },
197
	.vco = { .min = 1750000, .max = 3500000},
198
	.n = { .min = 1, .max = 4 },
199
	.m = { .min = 104, .max = 138 },
200
	.m1 = { .min = 17, .max = 23 },
201
	.m2 = { .min = 5, .max = 11 },
202
	.p = { .min = 10, .max = 30 },
203
	.p1 = { .min = 1, .max = 3},
204
	.p2 = { .dot_limit = 270000,
205
		.p2_slow = 10,
206
		.p2_fast = 10
207
	},
208
	.find_pll = intel_g4x_find_best_PLL,
209
};
210
 
211
static const intel_limit_t intel_limits_g4x_hdmi = {
212
	.dot = { .min = 22000, .max = 400000 },
213
	.vco = { .min = 1750000, .max = 3500000},
214
	.n = { .min = 1, .max = 4 },
215
	.m = { .min = 104, .max = 138 },
216
	.m1 = { .min = 16, .max = 23 },
217
	.m2 = { .min = 5, .max = 11 },
218
	.p = { .min = 5, .max = 80 },
219
	.p1 = { .min = 1, .max = 8},
220
	.p2 = { .dot_limit = 165000,
221
		.p2_slow = 10, .p2_fast = 5 },
222
	.find_pll = intel_g4x_find_best_PLL,
223
};
224
 
225
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
226
	.dot = { .min = 20000, .max = 115000 },
227
	.vco = { .min = 1750000, .max = 3500000 },
228
	.n = { .min = 1, .max = 3 },
229
	.m = { .min = 104, .max = 138 },
230
	.m1 = { .min = 17, .max = 23 },
231
	.m2 = { .min = 5, .max = 11 },
232
	.p = { .min = 28, .max = 112 },
233
	.p1 = { .min = 2, .max = 8 },
234
	.p2 = { .dot_limit = 0,
235
		.p2_slow = 14, .p2_fast = 14
236
	},
237
	.find_pll = intel_g4x_find_best_PLL,
238
};
239
 
240
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
241
	.dot = { .min = 80000, .max = 224000 },
242
	.vco = { .min = 1750000, .max = 3500000 },
243
	.n = { .min = 1, .max = 3 },
244
	.m = { .min = 104, .max = 138 },
245
	.m1 = { .min = 17, .max = 23 },
246
	.m2 = { .min = 5, .max = 11 },
247
	.p = { .min = 14, .max = 42 },
248
	.p1 = { .min = 2, .max = 6 },
249
	.p2 = { .dot_limit = 0,
250
		.p2_slow = 7, .p2_fast = 7
251
	},
252
	.find_pll = intel_g4x_find_best_PLL,
253
};
254
 
255
static const intel_limit_t intel_limits_g4x_display_port = {
256
        .dot = { .min = 161670, .max = 227000 },
257
        .vco = { .min = 1750000, .max = 3500000},
258
        .n = { .min = 1, .max = 2 },
259
        .m = { .min = 97, .max = 108 },
260
        .m1 = { .min = 0x10, .max = 0x12 },
261
        .m2 = { .min = 0x05, .max = 0x06 },
262
        .p = { .min = 10, .max = 20 },
263
        .p1 = { .min = 1, .max = 2},
264
        .p2 = { .dot_limit = 0,
265
		.p2_slow = 10, .p2_fast = 10 },
266
        .find_pll = intel_find_pll_g4x_dp,
267
};
268
 
269
static const intel_limit_t intel_limits_pineview_sdvo = {
270
        .dot = { .min = 20000, .max = 400000},
271
        .vco = { .min = 1700000, .max = 3500000 },
272
	/* Pineview's Ncounter is a ring counter */
273
        .n = { .min = 3, .max = 6 },
274
        .m = { .min = 2, .max = 256 },
275
	/* Pineview only has one combined m divider, which we treat as m2. */
276
        .m1 = { .min = 0, .max = 0 },
277
        .m2 = { .min = 0, .max = 254 },
278
        .p = { .min = 5, .max = 80 },
279
        .p1 = { .min = 1, .max = 8 },
280
	.p2 = { .dot_limit = 200000,
281
		.p2_slow = 10, .p2_fast = 5 },
282
	.find_pll = intel_find_best_PLL,
283
};
284
 
285
static const intel_limit_t intel_limits_pineview_lvds = {
286
        .dot = { .min = 20000, .max = 400000 },
287
        .vco = { .min = 1700000, .max = 3500000 },
288
        .n = { .min = 3, .max = 6 },
289
        .m = { .min = 2, .max = 256 },
290
        .m1 = { .min = 0, .max = 0 },
291
        .m2 = { .min = 0, .max = 254 },
292
        .p = { .min = 7, .max = 112 },
293
        .p1 = { .min = 1, .max = 8 },
294
	.p2 = { .dot_limit = 112000,
295
		.p2_slow = 14, .p2_fast = 14 },
296
	.find_pll = intel_find_best_PLL,
297
};
298
 
299
/* Ironlake / Sandybridge
300
 *
301
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
302
 * the range value for them is (actual_value - 2).
303
 */
304
static const intel_limit_t intel_limits_ironlake_dac = {
305
	.dot = { .min = 25000, .max = 350000 },
306
	.vco = { .min = 1760000, .max = 3510000 },
307
	.n = { .min = 1, .max = 5 },
308
	.m = { .min = 79, .max = 127 },
309
	.m1 = { .min = 12, .max = 22 },
310
	.m2 = { .min = 5, .max = 9 },
311
	.p = { .min = 5, .max = 80 },
312
	.p1 = { .min = 1, .max = 8 },
313
	.p2 = { .dot_limit = 225000,
314
		.p2_slow = 10, .p2_fast = 5 },
315
	.find_pll = intel_g4x_find_best_PLL,
316
};
317
 
318
static const intel_limit_t intel_limits_ironlake_single_lvds = {
319
	.dot = { .min = 25000, .max = 350000 },
320
	.vco = { .min = 1760000, .max = 3510000 },
321
	.n = { .min = 1, .max = 3 },
322
	.m = { .min = 79, .max = 118 },
323
	.m1 = { .min = 12, .max = 22 },
324
	.m2 = { .min = 5, .max = 9 },
325
	.p = { .min = 28, .max = 112 },
326
	.p1 = { .min = 2, .max = 8 },
327
	.p2 = { .dot_limit = 225000,
328
		.p2_slow = 14, .p2_fast = 14 },
329
	.find_pll = intel_g4x_find_best_PLL,
330
};
331
 
332
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
333
	.dot = { .min = 25000, .max = 350000 },
334
	.vco = { .min = 1760000, .max = 3510000 },
335
	.n = { .min = 1, .max = 3 },
336
	.m = { .min = 79, .max = 127 },
337
	.m1 = { .min = 12, .max = 22 },
338
	.m2 = { .min = 5, .max = 9 },
339
	.p = { .min = 14, .max = 56 },
340
	.p1 = { .min = 2, .max = 8 },
341
	.p2 = { .dot_limit = 225000,
342
		.p2_slow = 7, .p2_fast = 7 },
343
	.find_pll = intel_g4x_find_best_PLL,
344
};
345
 
346
/* LVDS 100mhz refclk limits. */
347
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
348
	.dot = { .min = 25000, .max = 350000 },
349
	.vco = { .min = 1760000, .max = 3510000 },
350
	.n = { .min = 1, .max = 2 },
351
	.m = { .min = 79, .max = 126 },
352
	.m1 = { .min = 12, .max = 22 },
353
	.m2 = { .min = 5, .max = 9 },
354
	.p = { .min = 28, .max = 112 },
355
	.p1 = { .min = 2,.max = 8 },
356
	.p2 = { .dot_limit = 225000,
357
		.p2_slow = 14, .p2_fast = 14 },
358
	.find_pll = intel_g4x_find_best_PLL,
359
};
360
 
361
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
362
	.dot = { .min = 25000, .max = 350000 },
363
	.vco = { .min = 1760000, .max = 3510000 },
364
	.n = { .min = 1, .max = 3 },
365
	.m = { .min = 79, .max = 126 },
366
	.m1 = { .min = 12, .max = 22 },
367
	.m2 = { .min = 5, .max = 9 },
368
	.p = { .min = 14, .max = 42 },
369
	.p1 = { .min = 2,.max = 6 },
370
	.p2 = { .dot_limit = 225000,
371
		.p2_slow = 7, .p2_fast = 7 },
372
	.find_pll = intel_g4x_find_best_PLL,
373
};
374
 
375
static const intel_limit_t intel_limits_ironlake_display_port = {
376
        .dot = { .min = 25000, .max = 350000 },
377
        .vco = { .min = 1760000, .max = 3510000},
378
        .n = { .min = 1, .max = 2 },
379
        .m = { .min = 81, .max = 90 },
380
        .m1 = { .min = 12, .max = 22 },
381
        .m2 = { .min = 5, .max = 9 },
382
        .p = { .min = 10, .max = 20 },
383
        .p1 = { .min = 1, .max = 2},
384
        .p2 = { .dot_limit = 0,
385
		.p2_slow = 10, .p2_fast = 10 },
386
        .find_pll = intel_find_pll_ironlake_dp,
387
};
388
 
389
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
390
						int refclk)
391
{
392
	struct drm_device *dev = crtc->dev;
393
	struct drm_i915_private *dev_priv = dev->dev_private;
394
	const intel_limit_t *limit;
395
 
396
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
397
		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
398
		    LVDS_CLKB_POWER_UP) {
399
			/* LVDS dual channel */
400
			if (refclk == 100000)
401
				limit = &intel_limits_ironlake_dual_lvds_100m;
402
			else
403
				limit = &intel_limits_ironlake_dual_lvds;
404
		} else {
405
			if (refclk == 100000)
406
				limit = &intel_limits_ironlake_single_lvds_100m;
407
			else
408
				limit = &intel_limits_ironlake_single_lvds;
409
		}
410
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
411
			HAS_eDP)
412
		limit = &intel_limits_ironlake_display_port;
413
	else
414
		limit = &intel_limits_ironlake_dac;
415
 
416
	return limit;
417
}
418
 
419
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
420
{
421
	struct drm_device *dev = crtc->dev;
422
	struct drm_i915_private *dev_priv = dev->dev_private;
423
	const intel_limit_t *limit;
424
 
425
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
426
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
427
		    LVDS_CLKB_POWER_UP)
428
			/* LVDS with dual channel */
429
			limit = &intel_limits_g4x_dual_channel_lvds;
430
		else
431
			/* LVDS with dual channel */
432
			limit = &intel_limits_g4x_single_channel_lvds;
433
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
434
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
435
		limit = &intel_limits_g4x_hdmi;
436
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
437
		limit = &intel_limits_g4x_sdvo;
438
	} else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
439
		limit = &intel_limits_g4x_display_port;
440
	} else /* The option is for other outputs */
441
		limit = &intel_limits_i9xx_sdvo;
442
 
443
	return limit;
444
}
445
 
446
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
447
{
448
	struct drm_device *dev = crtc->dev;
449
	const intel_limit_t *limit;
450
 
451
	if (HAS_PCH_SPLIT(dev))
452
		limit = intel_ironlake_limit(crtc, refclk);
453
	else if (IS_G4X(dev)) {
454
		limit = intel_g4x_limit(crtc);
455
	} else if (IS_PINEVIEW(dev)) {
456
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
457
			limit = &intel_limits_pineview_lvds;
458
		else
459
			limit = &intel_limits_pineview_sdvo;
460
	} else if (!IS_GEN2(dev)) {
461
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
462
			limit = &intel_limits_i9xx_lvds;
463
		else
464
			limit = &intel_limits_i9xx_sdvo;
465
	} else {
466
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
467
			limit = &intel_limits_i8xx_lvds;
468
		else
469
			limit = &intel_limits_i8xx_dvo;
470
	}
471
	return limit;
472
}
473
 
474
/* m1 is reserved as 0 in Pineview, n is a ring counter */
475
static void pineview_clock(int refclk, intel_clock_t *clock)
476
{
477
	clock->m = clock->m2 + 2;
478
	clock->p = clock->p1 * clock->p2;
479
	clock->vco = refclk * clock->m / clock->n;
480
	clock->dot = clock->vco / clock->p;
481
}
482
 
483
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
484
{
485
	if (IS_PINEVIEW(dev)) {
486
		pineview_clock(refclk, clock);
487
		return;
488
	}
489
	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
490
	clock->p = clock->p1 * clock->p2;
491
	clock->vco = refclk * clock->m / (clock->n + 2);
492
	clock->dot = clock->vco / clock->p;
493
}
494
 
495
/**
496
 * Returns whether any output on the specified pipe is of the specified type
497
 */
498
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
499
{
500
	struct drm_device *dev = crtc->dev;
501
	struct drm_mode_config *mode_config = &dev->mode_config;
502
	struct intel_encoder *encoder;
503
 
504
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
505
		if (encoder->base.crtc == crtc && encoder->type == type)
506
			return true;
507
 
508
	return false;
509
}
510
 
511
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
512
/**
513
 * Returns whether the given set of divisors are valid for a given refclk with
514
 * the given connectors.
515
 */
516
 
517
static bool intel_PLL_is_valid(struct drm_device *dev,
518
			       const intel_limit_t *limit,
519
			       const intel_clock_t *clock)
520
{
521
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
522
		INTELPllInvalid ("p1 out of range\n");
523
	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
524
		INTELPllInvalid ("p out of range\n");
525
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
526
		INTELPllInvalid ("m2 out of range\n");
527
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
528
		INTELPllInvalid ("m1 out of range\n");
529
	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
530
		INTELPllInvalid ("m1 <= m2\n");
531
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
532
		INTELPllInvalid ("m out of range\n");
533
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
534
		INTELPllInvalid ("n out of range\n");
535
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
536
		INTELPllInvalid ("vco out of range\n");
537
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
538
	 * connector, etc., rather than just a single range.
539
	 */
540
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
541
		INTELPllInvalid ("dot out of range\n");
542
 
543
	return true;
544
}
545
 
546
static bool
547
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
548
		    int target, int refclk, intel_clock_t *best_clock)
549
 
550
{
551
	struct drm_device *dev = crtc->dev;
552
	struct drm_i915_private *dev_priv = dev->dev_private;
553
	intel_clock_t clock;
554
	int err = target;
555
 
556
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
557
	    (I915_READ(LVDS)) != 0) {
558
		/*
559
		 * For LVDS, if the panel is on, just rely on its current
560
		 * settings for dual-channel.  We haven't figured out how to
561
		 * reliably set up different single/dual channel state, if we
562
		 * even can.
563
		 */
564
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
565
		    LVDS_CLKB_POWER_UP)
566
			clock.p2 = limit->p2.p2_fast;
567
		else
568
			clock.p2 = limit->p2.p2_slow;
569
	} else {
570
		if (target < limit->p2.dot_limit)
571
			clock.p2 = limit->p2.p2_slow;
572
		else
573
			clock.p2 = limit->p2.p2_fast;
574
	}
575
 
576
	memset (best_clock, 0, sizeof (*best_clock));
577
 
578
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
579
	     clock.m1++) {
580
		for (clock.m2 = limit->m2.min;
581
		     clock.m2 <= limit->m2.max; clock.m2++) {
582
			/* m1 is always 0 in Pineview */
583
			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
584
				break;
585
			for (clock.n = limit->n.min;
586
			     clock.n <= limit->n.max; clock.n++) {
587
				for (clock.p1 = limit->p1.min;
588
					clock.p1 <= limit->p1.max; clock.p1++) {
589
					int this_err;
590
 
591
					intel_clock(dev, refclk, &clock);
592
					if (!intel_PLL_is_valid(dev, limit,
593
								&clock))
594
						continue;
595
 
596
					this_err = abs(clock.dot - target);
597
					if (this_err < err) {
598
						*best_clock = clock;
599
						err = this_err;
600
					}
601
				}
602
			}
603
		}
604
	}
605
 
606
	return (err != target);
607
}
608
 
609
static bool
610
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
611
			int target, int refclk, intel_clock_t *best_clock)
612
{
613
	struct drm_device *dev = crtc->dev;
614
	struct drm_i915_private *dev_priv = dev->dev_private;
615
	intel_clock_t clock;
616
	int max_n;
617
	bool found;
618
	/* approximately equals target * 0.00585 */
619
	int err_most = (target >> 8) + (target >> 9);
620
	found = false;
621
 
622
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
623
		int lvds_reg;
624
 
625
		if (HAS_PCH_SPLIT(dev))
626
			lvds_reg = PCH_LVDS;
627
		else
628
			lvds_reg = LVDS;
629
		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
630
		    LVDS_CLKB_POWER_UP)
631
			clock.p2 = limit->p2.p2_fast;
632
		else
633
			clock.p2 = limit->p2.p2_slow;
634
	} else {
635
		if (target < limit->p2.dot_limit)
636
			clock.p2 = limit->p2.p2_slow;
637
		else
638
			clock.p2 = limit->p2.p2_fast;
639
	}
640
 
641
	memset(best_clock, 0, sizeof(*best_clock));
642
	max_n = limit->n.max;
643
	/* based on hardware requirement, prefer smaller n to precision */
644
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
645
		/* based on hardware requirement, prefere larger m1,m2 */
646
		for (clock.m1 = limit->m1.max;
647
		     clock.m1 >= limit->m1.min; clock.m1--) {
648
			for (clock.m2 = limit->m2.max;
649
			     clock.m2 >= limit->m2.min; clock.m2--) {
650
				for (clock.p1 = limit->p1.max;
651
				     clock.p1 >= limit->p1.min; clock.p1--) {
652
					int this_err;
653
 
654
					intel_clock(dev, refclk, &clock);
655
					if (!intel_PLL_is_valid(dev, limit,
656
								&clock))
657
						continue;
658
 
659
					this_err = abs(clock.dot - target);
660
					if (this_err < err_most) {
661
						*best_clock = clock;
662
						err_most = this_err;
663
						max_n = clock.n;
664
						found = true;
665
					}
666
				}
667
			}
668
		}
669
	}
670
	return found;
671
}
672
 
673
static bool
674
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
675
			   int target, int refclk, intel_clock_t *best_clock)
676
{
677
	struct drm_device *dev = crtc->dev;
678
	intel_clock_t clock;
679
 
680
	if (target < 200000) {
681
		clock.n = 1;
682
		clock.p1 = 2;
683
		clock.p2 = 10;
684
		clock.m1 = 12;
685
		clock.m2 = 9;
686
	} else {
687
		clock.n = 2;
688
		clock.p1 = 1;
689
		clock.p2 = 10;
690
		clock.m1 = 14;
691
		clock.m2 = 8;
692
	}
693
	intel_clock(dev, refclk, &clock);
694
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
695
	return true;
696
}
697
 
698
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
699
static bool
700
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
701
		      int target, int refclk, intel_clock_t *best_clock)
702
{
703
	intel_clock_t clock;
704
	if (target < 200000) {
705
		clock.p1 = 2;
706
		clock.p2 = 10;
707
		clock.n = 2;
708
		clock.m1 = 23;
709
		clock.m2 = 8;
710
	} else {
711
		clock.p1 = 1;
712
		clock.p2 = 10;
713
		clock.n = 1;
714
		clock.m1 = 14;
715
		clock.m2 = 2;
716
	}
717
	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
718
	clock.p = (clock.p1 * clock.p2);
719
	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
720
	clock.vco = 0;
721
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
722
	return true;
723
}
724
 
725
/**
726
 * intel_wait_for_vblank - wait for vblank on a given pipe
727
 * @dev: drm device
728
 * @pipe: pipe to wait for
729
 *
730
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
731
 * mode setting code.
732
 */
733
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
734
{
735
	struct drm_i915_private *dev_priv = dev->dev_private;
736
	int pipestat_reg = PIPESTAT(pipe);
737
 
738
	/* Clear existing vblank status. Note this will clear any other
739
	 * sticky status fields as well.
740
	 *
741
	 * This races with i915_driver_irq_handler() with the result
742
	 * that either function could miss a vblank event.  Here it is not
743
	 * fatal, as we will either wait upon the next vblank interrupt or
744
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
745
	 * called during modeset at which time the GPU should be idle and
746
	 * should *not* be performing page flips and thus not waiting on
747
	 * vblanks...
748
	 * Currently, the result of us stealing a vblank from the irq
749
	 * handler is that a single frame will be skipped during swapbuffers.
750
	 */
751
	I915_WRITE(pipestat_reg,
752
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
753
 
754
	/* Wait for vblank interrupt bit to set */
755
	if (wait_for(I915_READ(pipestat_reg) &
756
		     PIPE_VBLANK_INTERRUPT_STATUS,
757
		     50))
758
		DRM_DEBUG_KMS("vblank wait timed out\n");
759
}
760
 
761
/*
762
 * intel_wait_for_pipe_off - wait for pipe to turn off
763
 * @dev: drm device
764
 * @pipe: pipe to wait for
765
 *
766
 * After disabling a pipe, we can't wait for vblank in the usual way,
767
 * spinning on the vblank interrupt status bit, since we won't actually
768
 * see an interrupt when the pipe is disabled.
769
 *
770
 * On Gen4 and above:
771
 *   wait for the pipe register state bit to turn off
772
 *
773
 * Otherwise:
774
 *   wait for the display line value to settle (it usually
775
 *   ends up stopping at the start of the next frame).
776
 *
777
 */
778
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
779
{
780
	struct drm_i915_private *dev_priv = dev->dev_private;
781
 
782
	if (INTEL_INFO(dev)->gen >= 4) {
783
		int reg = PIPECONF(pipe);
784
 
785
		/* Wait for the Pipe State to go off */
786
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
787
			     100))
788
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
789
	} else {
790
		u32 last_line;
791
		int reg = PIPEDSL(pipe);
792
		unsigned long timeout = jiffies + msecs_to_jiffies(100);
793
 
794
		/* Wait for the display line to settle */
795
		do {
796
			last_line = I915_READ(reg) & DSL_LINEMASK;
797
			mdelay(5);
798
		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
799
			 time_after(timeout, jiffies));
800
		if (time_after(jiffies, timeout))
801
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
802
	}
803
}
804
 
805
static const char *state_string(bool enabled)
806
{
807
	return enabled ? "on" : "off";
808
}
809
 
810
/* Only for pre-ILK configs */
811
static void assert_pll(struct drm_i915_private *dev_priv,
812
		       enum pipe pipe, bool state)
813
{
814
	int reg;
815
	u32 val;
816
	bool cur_state;
817
 
818
	reg = DPLL(pipe);
819
	val = I915_READ(reg);
820
	cur_state = !!(val & DPLL_VCO_ENABLE);
821
	WARN(cur_state != state,
822
	     "PLL state assertion failure (expected %s, current %s)\n",
823
	     state_string(state), state_string(cur_state));
824
}
825
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
826
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
827
 
828
/* For ILK+ */
829
static void assert_pch_pll(struct drm_i915_private *dev_priv,
830
			   enum pipe pipe, bool state)
831
{
832
	int reg;
833
	u32 val;
834
	bool cur_state;
835
 
836
	reg = PCH_DPLL(pipe);
837
	val = I915_READ(reg);
838
	cur_state = !!(val & DPLL_VCO_ENABLE);
839
	WARN(cur_state != state,
840
	     "PCH PLL state assertion failure (expected %s, current %s)\n",
841
	     state_string(state), state_string(cur_state));
842
}
843
#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
844
#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
845
 
846
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
847
			  enum pipe pipe, bool state)
848
{
849
	int reg;
850
	u32 val;
851
	bool cur_state;
852
 
853
	reg = FDI_TX_CTL(pipe);
854
	val = I915_READ(reg);
855
	cur_state = !!(val & FDI_TX_ENABLE);
856
	WARN(cur_state != state,
857
	     "FDI TX state assertion failure (expected %s, current %s)\n",
858
	     state_string(state), state_string(cur_state));
859
}
860
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
861
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
862
 
863
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
864
			  enum pipe pipe, bool state)
865
{
866
	int reg;
867
	u32 val;
868
	bool cur_state;
869
 
870
	reg = FDI_RX_CTL(pipe);
871
	val = I915_READ(reg);
872
	cur_state = !!(val & FDI_RX_ENABLE);
873
	WARN(cur_state != state,
874
	     "FDI RX state assertion failure (expected %s, current %s)\n",
875
	     state_string(state), state_string(cur_state));
876
}
877
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
878
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
879
 
880
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
881
				      enum pipe pipe)
882
{
883
	int reg;
884
	u32 val;
885
 
886
	/* ILK FDI PLL is always enabled */
887
	if (dev_priv->info->gen == 5)
888
		return;
889
 
890
	reg = FDI_TX_CTL(pipe);
891
	val = I915_READ(reg);
892
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
893
}
894
 
895
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
896
				      enum pipe pipe)
897
{
898
	int reg;
899
	u32 val;
900
 
901
	reg = FDI_RX_CTL(pipe);
902
	val = I915_READ(reg);
903
	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
904
}
905
 
906
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
907
				  enum pipe pipe)
908
{
909
	int pp_reg, lvds_reg;
910
	u32 val;
911
	enum pipe panel_pipe = PIPE_A;
912
	bool locked = true;
913
 
914
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
915
		pp_reg = PCH_PP_CONTROL;
916
		lvds_reg = PCH_LVDS;
917
	} else {
918
		pp_reg = PP_CONTROL;
919
		lvds_reg = LVDS;
920
	}
921
 
922
	val = I915_READ(pp_reg);
923
	if (!(val & PANEL_POWER_ON) ||
924
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
925
		locked = false;
926
 
927
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
928
		panel_pipe = PIPE_B;
929
 
930
	WARN(panel_pipe == pipe && locked,
931
	     "panel assertion failure, pipe %c regs locked\n",
932
	     pipe_name(pipe));
933
}
934
 
935
static void assert_pipe(struct drm_i915_private *dev_priv,
936
			enum pipe pipe, bool state)
937
{
938
	int reg;
939
	u32 val;
940
	bool cur_state;
941
 
942
	reg = PIPECONF(pipe);
943
	val = I915_READ(reg);
944
	cur_state = !!(val & PIPECONF_ENABLE);
945
	WARN(cur_state != state,
946
	     "pipe %c assertion failure (expected %s, current %s)\n",
947
	     pipe_name(pipe), state_string(state), state_string(cur_state));
948
}
949
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
950
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
951
 
952
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
953
				 enum plane plane)
954
{
955
	int reg;
956
	u32 val;
957
 
958
	reg = DSPCNTR(plane);
959
	val = I915_READ(reg);
960
	WARN(!(val & DISPLAY_PLANE_ENABLE),
961
	     "plane %c assertion failure, should be active but is disabled\n",
962
	     plane_name(plane));
963
}
964
 
965
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
966
				   enum pipe pipe)
967
{
968
	int reg, i;
969
	u32 val;
970
	int cur_pipe;
971
 
972
	/* Planes are fixed to pipes on ILK+ */
973
	if (HAS_PCH_SPLIT(dev_priv->dev))
974
		return;
975
 
976
	/* Need to check both planes against the pipe */
977
	for (i = 0; i < 2; i++) {
978
		reg = DSPCNTR(i);
979
		val = I915_READ(reg);
980
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
981
			DISPPLANE_SEL_PIPE_SHIFT;
982
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
983
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
984
		     plane_name(i), pipe_name(pipe));
985
	}
986
}
987
 
988
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
989
{
990
	u32 val;
991
	bool enabled;
992
 
993
	val = I915_READ(PCH_DREF_CONTROL);
994
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
995
			    DREF_SUPERSPREAD_SOURCE_MASK));
996
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
997
}
998
 
999
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1000
				       enum pipe pipe)
1001
{
1002
	int reg;
1003
	u32 val;
1004
	bool enabled;
1005
 
1006
	reg = TRANSCONF(pipe);
1007
	val = I915_READ(reg);
1008
	enabled = !!(val & TRANS_ENABLE);
1009
	WARN(enabled,
1010
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1011
	     pipe_name(pipe));
1012
}
1013
 
1014
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1015
			    enum pipe pipe, u32 port_sel, u32 val)
1016
{
1017
	if ((val & DP_PORT_EN) == 0)
1018
		return false;
1019
 
1020
	if (HAS_PCH_CPT(dev_priv->dev)) {
1021
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1022
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1023
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1024
			return false;
1025
	} else {
1026
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1027
			return false;
1028
	}
1029
	return true;
1030
}
1031
 
1032
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1033
			      enum pipe pipe, u32 val)
1034
{
1035
	if ((val & PORT_ENABLE) == 0)
1036
		return false;
1037
 
1038
	if (HAS_PCH_CPT(dev_priv->dev)) {
1039
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1040
			return false;
1041
	} else {
1042
		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1043
			return false;
1044
	}
1045
	return true;
1046
}
1047
 
1048
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1049
			      enum pipe pipe, u32 val)
1050
{
1051
	if ((val & LVDS_PORT_EN) == 0)
1052
		return false;
1053
 
1054
	if (HAS_PCH_CPT(dev_priv->dev)) {
1055
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1056
			return false;
1057
	} else {
1058
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1059
			return false;
1060
	}
1061
	return true;
1062
}
1063
 
1064
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1065
			      enum pipe pipe, u32 val)
1066
{
1067
	if ((val & ADPA_DAC_ENABLE) == 0)
1068
		return false;
1069
	if (HAS_PCH_CPT(dev_priv->dev)) {
1070
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1071
			return false;
1072
	} else {
1073
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1074
			return false;
1075
	}
1076
	return true;
1077
}
1078
 
1079
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1080
				   enum pipe pipe, int reg, u32 port_sel)
1081
{
1082
	u32 val = I915_READ(reg);
1083
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1084
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1085
	     reg, pipe_name(pipe));
1086
}
1087
 
1088
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1089
				     enum pipe pipe, int reg)
1090
{
1091
	u32 val = I915_READ(reg);
1092
	WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1093
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1094
	     reg, pipe_name(pipe));
1095
}
1096
 
1097
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1098
				      enum pipe pipe)
1099
{
1100
	int reg;
1101
	u32 val;
1102
 
1103
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1104
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1105
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1106
 
1107
	reg = PCH_ADPA;
1108
	val = I915_READ(reg);
1109
	WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1110
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1111
	     pipe_name(pipe));
1112
 
1113
	reg = PCH_LVDS;
1114
	val = I915_READ(reg);
1115
	WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1116
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1117
	     pipe_name(pipe));
1118
 
1119
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1120
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1121
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1122
}
1123
 
1124
/**
1125
 * intel_enable_pll - enable a PLL
1126
 * @dev_priv: i915 private structure
1127
 * @pipe: pipe PLL to enable
1128
 *
1129
 * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1130
 * make sure the PLL reg is writable first though, since the panel write
1131
 * protect mechanism may be enabled.
1132
 *
1133
 * Note!  This is for pre-ILK only.
1134
 */
1135
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1136
{
1137
    int reg;
1138
    u32 val;
1139
 
1140
    /* No really, not for ILK+ */
1141
    BUG_ON(dev_priv->info->gen >= 5);
1142
 
1143
    /* PLL is protected by panel, make sure we can write it */
1144
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1145
        assert_panel_unlocked(dev_priv, pipe);
1146
 
1147
    reg = DPLL(pipe);
1148
    val = I915_READ(reg);
1149
    val |= DPLL_VCO_ENABLE;
1150
 
1151
    /* We do this three times for luck */
1152
    I915_WRITE(reg, val);
1153
    POSTING_READ(reg);
1154
    udelay(150); /* wait for warmup */
1155
    I915_WRITE(reg, val);
1156
    POSTING_READ(reg);
1157
    udelay(150); /* wait for warmup */
1158
    I915_WRITE(reg, val);
1159
    POSTING_READ(reg);
1160
    udelay(150); /* wait for warmup */
1161
}
1162
 
1163
/**
1164
 * intel_disable_pll - disable a PLL
1165
 * @dev_priv: i915 private structure
1166
 * @pipe: pipe PLL to disable
1167
 *
1168
 * Disable the PLL for @pipe, making sure the pipe is off first.
1169
 *
1170
 * Note!  This is for pre-ILK only.
1171
 */
1172
static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1173
{
1174
	int reg;
1175
	u32 val;
1176
 
1177
	/* Don't disable pipe A or pipe A PLLs if needed */
1178
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1179
		return;
1180
 
1181
	/* Make sure the pipe isn't still relying on us */
1182
	assert_pipe_disabled(dev_priv, pipe);
1183
 
1184
	reg = DPLL(pipe);
1185
	val = I915_READ(reg);
1186
	val &= ~DPLL_VCO_ENABLE;
1187
	I915_WRITE(reg, val);
1188
	POSTING_READ(reg);
1189
}
1190
 
1191
/**
1192
 * intel_enable_pch_pll - enable PCH PLL
1193
 * @dev_priv: i915 private structure
1194
 * @pipe: pipe PLL to enable
1195
 *
1196
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1197
 * drives the transcoder clock.
1198
 */
1199
static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1200
				 enum pipe pipe)
1201
{
1202
	int reg;
1203
	u32 val;
1204
 
1205
	/* PCH only available on ILK+ */
1206
	BUG_ON(dev_priv->info->gen < 5);
1207
 
1208
	/* PCH refclock must be enabled first */
1209
	assert_pch_refclk_enabled(dev_priv);
1210
 
1211
	reg = PCH_DPLL(pipe);
1212
	val = I915_READ(reg);
1213
	val |= DPLL_VCO_ENABLE;
1214
	I915_WRITE(reg, val);
1215
	POSTING_READ(reg);
1216
	udelay(200);
1217
}
1218
 
1219
static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1220
				  enum pipe pipe)
1221
{
1222
	int reg;
1223
	u32 val;
1224
 
1225
	/* PCH only available on ILK+ */
1226
	BUG_ON(dev_priv->info->gen < 5);
1227
 
1228
	/* Make sure transcoder isn't still depending on us */
1229
	assert_transcoder_disabled(dev_priv, pipe);
1230
 
1231
	reg = PCH_DPLL(pipe);
1232
	val = I915_READ(reg);
1233
	val &= ~DPLL_VCO_ENABLE;
1234
	I915_WRITE(reg, val);
1235
	POSTING_READ(reg);
1236
	udelay(200);
1237
}
1238
 
1239
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1240
				    enum pipe pipe)
1241
{
1242
	int reg;
1243
	u32 val;
1244
 
1245
	/* PCH only available on ILK+ */
1246
	BUG_ON(dev_priv->info->gen < 5);
1247
 
1248
	/* Make sure PCH DPLL is enabled */
1249
	assert_pch_pll_enabled(dev_priv, pipe);
1250
 
1251
	/* FDI must be feeding us bits for PCH ports */
1252
	assert_fdi_tx_enabled(dev_priv, pipe);
1253
	assert_fdi_rx_enabled(dev_priv, pipe);
1254
 
1255
	reg = TRANSCONF(pipe);
1256
	val = I915_READ(reg);
1257
 
1258
	if (HAS_PCH_IBX(dev_priv->dev)) {
1259
		/*
1260
		 * make the BPC in transcoder be consistent with
1261
		 * that in pipeconf reg.
1262
		 */
1263
		val &= ~PIPE_BPC_MASK;
1264
		val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1265
	}
1266
	I915_WRITE(reg, val | TRANS_ENABLE);
1267
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1268
		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1269
}
1270
 
1271
static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1272
				     enum pipe pipe)
1273
{
1274
	int reg;
1275
	u32 val;
1276
 
1277
	/* FDI relies on the transcoder */
1278
	assert_fdi_tx_disabled(dev_priv, pipe);
1279
	assert_fdi_rx_disabled(dev_priv, pipe);
1280
 
1281
	/* Ports must be off as well */
1282
	assert_pch_ports_disabled(dev_priv, pipe);
1283
 
1284
	reg = TRANSCONF(pipe);
1285
	val = I915_READ(reg);
1286
	val &= ~TRANS_ENABLE;
1287
	I915_WRITE(reg, val);
1288
	/* wait for PCH transcoder off, transcoder state */
1289
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1290
		DRM_ERROR("failed to disable transcoder\n");
1291
}
1292
 
1293
/**
1294
 * intel_enable_pipe - enable a pipe, asserting requirements
1295
 * @dev_priv: i915 private structure
1296
 * @pipe: pipe to enable
1297
 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1298
 *
1299
 * Enable @pipe, making sure that various hardware specific requirements
1300
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1301
 *
1302
 * @pipe should be %PIPE_A or %PIPE_B.
1303
 *
1304
 * Will wait until the pipe is actually running (i.e. first vblank) before
1305
 * returning.
1306
 */
1307
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1308
			      bool pch_port)
1309
{
1310
	int reg;
1311
	u32 val;
1312
 
1313
	/*
1314
	 * A pipe without a PLL won't actually be able to drive bits from
1315
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1316
	 * need the check.
1317
	 */
1318
	if (!HAS_PCH_SPLIT(dev_priv->dev))
1319
		assert_pll_enabled(dev_priv, pipe);
1320
	else {
1321
		if (pch_port) {
1322
			/* if driving the PCH, we need FDI enabled */
1323
			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1324
			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1325
		}
1326
		/* FIXME: assert CPU port conditions for SNB+ */
1327
	}
1328
 
1329
	reg = PIPECONF(pipe);
1330
	val = I915_READ(reg);
1331
	if (val & PIPECONF_ENABLE)
1332
		return;
1333
 
1334
	I915_WRITE(reg, val | PIPECONF_ENABLE);
1335
	intel_wait_for_vblank(dev_priv->dev, pipe);
1336
}
1337
 
1338
/**
1339
 * intel_disable_pipe - disable a pipe, asserting requirements
1340
 * @dev_priv: i915 private structure
1341
 * @pipe: pipe to disable
1342
 *
1343
 * Disable @pipe, making sure that various hardware specific requirements
1344
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1345
 *
1346
 * @pipe should be %PIPE_A or %PIPE_B.
1347
 *
1348
 * Will wait until the pipe has shut down before returning.
1349
 */
1350
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1351
			       enum pipe pipe)
1352
{
1353
	int reg;
1354
	u32 val;
1355
 
1356
	/*
1357
	 * Make sure planes won't keep trying to pump pixels to us,
1358
	 * or we might hang the display.
1359
	 */
1360
	assert_planes_disabled(dev_priv, pipe);
1361
 
1362
	/* Don't disable pipe A or pipe A PLLs if needed */
1363
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1364
		return;
1365
 
1366
	reg = PIPECONF(pipe);
1367
	val = I915_READ(reg);
1368
	if ((val & PIPECONF_ENABLE) == 0)
1369
		return;
1370
 
1371
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1372
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1373
}
1374
 
1375
/*
1376
 * Plane regs are double buffered, going from enabled->disabled needs a
1377
 * trigger in order to latch.  The display address reg provides this.
1378
 */
1379
static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1380
				      enum plane plane)
1381
{
1382
	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1383
	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1384
}
1385
 
1386
/**
1387
 * intel_enable_plane - enable a display plane on a given pipe
1388
 * @dev_priv: i915 private structure
1389
 * @plane: plane to enable
1390
 * @pipe: pipe being fed
1391
 *
1392
 * Enable @plane on @pipe, making sure that @pipe is running first.
1393
 */
1394
static void intel_enable_plane(struct drm_i915_private *dev_priv,
1395
			       enum plane plane, enum pipe pipe)
1396
{
1397
	int reg;
1398
	u32 val;
1399
 
1400
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1401
	assert_pipe_enabled(dev_priv, pipe);
1402
 
1403
	reg = DSPCNTR(plane);
1404
	val = I915_READ(reg);
1405
	if (val & DISPLAY_PLANE_ENABLE)
1406
		return;
1407
 
1408
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1409
	intel_flush_display_plane(dev_priv, plane);
1410
	intel_wait_for_vblank(dev_priv->dev, pipe);
1411
}
1412
 
1413
/**
1414
 * intel_disable_plane - disable a display plane
1415
 * @dev_priv: i915 private structure
1416
 * @plane: plane to disable
1417
 * @pipe: pipe consuming the data
1418
 *
1419
 * Disable @plane; should be an independent operation.
1420
 */
1421
static void intel_disable_plane(struct drm_i915_private *dev_priv,
1422
				enum plane plane, enum pipe pipe)
1423
{
1424
	int reg;
1425
	u32 val;
1426
 
1427
	reg = DSPCNTR(plane);
1428
	val = I915_READ(reg);
1429
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1430
		return;
1431
 
1432
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1433
	intel_flush_display_plane(dev_priv, plane);
1434
	intel_wait_for_vblank(dev_priv->dev, pipe);
1435
}
1436
 
1437
static void disable_pch_dp(struct drm_i915_private *dev_priv,
1438
			   enum pipe pipe, int reg, u32 port_sel)
1439
{
1440
	u32 val = I915_READ(reg);
1441
	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1442
		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1443
		I915_WRITE(reg, val & ~DP_PORT_EN);
1444
	}
1445
}
1446
 
1447
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1448
			     enum pipe pipe, int reg)
1449
{
1450
	u32 val = I915_READ(reg);
1451
	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1452
		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1453
			      reg, pipe);
1454
		I915_WRITE(reg, val & ~PORT_ENABLE);
1455
	}
1456
}
1457
 
1458
/* Disable any ports connected to this transcoder */
1459
static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1460
				    enum pipe pipe)
1461
{
1462
	u32 reg, val;
1463
 
1464
	val = I915_READ(PCH_PP_CONTROL);
1465
	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1466
 
1467
	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1468
	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1469
	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1470
 
1471
	reg = PCH_ADPA;
1472
	val = I915_READ(reg);
1473
	if (adpa_pipe_enabled(dev_priv, val, pipe))
1474
		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1475
 
1476
	reg = PCH_LVDS;
1477
	val = I915_READ(reg);
1478
	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1479
		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1480
		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1481
		POSTING_READ(reg);
1482
		udelay(100);
1483
	}
1484
 
1485
	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1486
	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1487
	disable_pch_hdmi(dev_priv, pipe, HDMID);
1488
}
1489
 
1490
static void i8xx_disable_fbc(struct drm_device *dev)
1491
{
1492
    struct drm_i915_private *dev_priv = dev->dev_private;
1493
    u32 fbc_ctl;
1494
 
1495
    /* Disable compression */
1496
    fbc_ctl = I915_READ(FBC_CONTROL);
1497
    if ((fbc_ctl & FBC_CTL_EN) == 0)
1498
        return;
1499
 
1500
    fbc_ctl &= ~FBC_CTL_EN;
1501
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1502
 
1503
    /* Wait for compressing bit to clear */
1504
    if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1505
        DRM_DEBUG_KMS("FBC idle timed out\n");
1506
        return;
1507
    }
1508
 
1509
    DRM_DEBUG_KMS("disabled FBC\n");
1510
}
1511
 
1512
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1513
{
1514
    struct drm_device *dev = crtc->dev;
1515
    struct drm_i915_private *dev_priv = dev->dev_private;
1516
    struct drm_framebuffer *fb = crtc->fb;
1517
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1518
    struct drm_i915_gem_object *obj = intel_fb->obj;
1519
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1520
    int cfb_pitch;
1521
    int plane, i;
1522
    u32 fbc_ctl, fbc_ctl2;
1523
 
1524
    cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1525
    if (fb->pitch < cfb_pitch)
1526
        cfb_pitch = fb->pitch;
1527
 
1528
    /* FBC_CTL wants 64B units */
1529
    cfb_pitch = (cfb_pitch / 64) - 1;
1530
    plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1531
 
1532
    /* Clear old tags */
1533
    for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1534
        I915_WRITE(FBC_TAG + (i * 4), 0);
1535
 
1536
    /* Set it up... */
1537
    fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1538
    fbc_ctl2 |= plane;
1539
    I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1540
    I915_WRITE(FBC_FENCE_OFF, crtc->y);
1541
 
1542
    /* enable it... */
1543
    fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1544
    if (IS_I945GM(dev))
1545
        fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1546
    fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1547
    fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1548
    fbc_ctl |= obj->fence_reg;
1549
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1550
 
1551
    DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1552
              cfb_pitch, crtc->y, intel_crtc->plane);
1553
}
1554
 
1555
static bool i8xx_fbc_enabled(struct drm_device *dev)
1556
{
1557
    struct drm_i915_private *dev_priv = dev->dev_private;
1558
 
1559
    return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1560
}
1561
 
1562
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1563
{
1564
    struct drm_device *dev = crtc->dev;
1565
    struct drm_i915_private *dev_priv = dev->dev_private;
1566
    struct drm_framebuffer *fb = crtc->fb;
1567
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1568
    struct drm_i915_gem_object *obj = intel_fb->obj;
1569
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1570
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1571
    unsigned long stall_watermark = 200;
1572
    u32 dpfc_ctl;
1573
 
1574
    dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1575
    dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1576
    I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1577
 
1578
    I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1579
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1580
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1581
    I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1582
 
1583
    /* enable it... */
1584
    I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1585
 
1586
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1587
}
1588
 
1589
static void g4x_disable_fbc(struct drm_device *dev)
1590
{
1591
    struct drm_i915_private *dev_priv = dev->dev_private;
1592
    u32 dpfc_ctl;
1593
 
1594
    /* Disable compression */
1595
    dpfc_ctl = I915_READ(DPFC_CONTROL);
1596
    if (dpfc_ctl & DPFC_CTL_EN) {
1597
        dpfc_ctl &= ~DPFC_CTL_EN;
1598
        I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1599
 
1600
        DRM_DEBUG_KMS("disabled FBC\n");
1601
    }
1602
}
1603
 
1604
static bool g4x_fbc_enabled(struct drm_device *dev)
1605
{
1606
    struct drm_i915_private *dev_priv = dev->dev_private;
1607
 
1608
    return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1609
}
1610
 
1611
static void sandybridge_blit_fbc_update(struct drm_device *dev)
1612
{
1613
	struct drm_i915_private *dev_priv = dev->dev_private;
1614
	u32 blt_ecoskpd;
1615
 
1616
	/* Make sure blitter notifies FBC of writes */
1617
	gen6_gt_force_wake_get(dev_priv);
1618
	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1619
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1620
		GEN6_BLITTER_LOCK_SHIFT;
1621
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1622
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1623
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1624
	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1625
			 GEN6_BLITTER_LOCK_SHIFT);
1626
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1627
	POSTING_READ(GEN6_BLITTER_ECOSKPD);
1628
	gen6_gt_force_wake_put(dev_priv);
1629
}
1630
 
1631
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1632
{
1633
    struct drm_device *dev = crtc->dev;
1634
    struct drm_i915_private *dev_priv = dev->dev_private;
1635
    struct drm_framebuffer *fb = crtc->fb;
1636
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1637
    struct drm_i915_gem_object *obj = intel_fb->obj;
1638
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1639
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1640
    unsigned long stall_watermark = 200;
1641
    u32 dpfc_ctl;
1642
 
1643
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1644
    dpfc_ctl &= DPFC_RESERVED;
1645
    dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1646
    /* Set persistent mode for front-buffer rendering, ala X. */
1647
    dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1648
    dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1649
    I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1650
 
1651
    I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1652
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1653
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1654
    I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1655
    I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1656
    /* enable it... */
1657
    I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1658
 
1659
    if (IS_GEN6(dev)) {
1660
        I915_WRITE(SNB_DPFC_CTL_SA,
1661
               SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1662
        I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1663
        sandybridge_blit_fbc_update(dev);
1664
    }
1665
 
1666
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1667
}
1668
 
1669
static void ironlake_disable_fbc(struct drm_device *dev)
1670
{
1671
    struct drm_i915_private *dev_priv = dev->dev_private;
1672
    u32 dpfc_ctl;
1673
 
1674
    /* Disable compression */
1675
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1676
    if (dpfc_ctl & DPFC_CTL_EN) {
1677
        dpfc_ctl &= ~DPFC_CTL_EN;
1678
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1679
 
1680
        DRM_DEBUG_KMS("disabled FBC\n");
1681
    }
1682
}
1683
 
1684
static bool ironlake_fbc_enabled(struct drm_device *dev)
1685
{
1686
    struct drm_i915_private *dev_priv = dev->dev_private;
1687
 
1688
    return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1689
}
1690
 
1691
bool intel_fbc_enabled(struct drm_device *dev)
1692
{
1693
	struct drm_i915_private *dev_priv = dev->dev_private;
1694
 
1695
	if (!dev_priv->display.fbc_enabled)
1696
		return false;
1697
 
1698
	return dev_priv->display.fbc_enabled(dev);
1699
}
1700
 
1701
 
1702
 
1703
 
1704
 
1705
 
1706
 
1707
 
1708
 
1709
 
1710
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1711
{
1712
	struct intel_fbc_work *work;
1713
	struct drm_device *dev = crtc->dev;
1714
	struct drm_i915_private *dev_priv = dev->dev_private;
1715
 
1716
	if (!dev_priv->display.enable_fbc)
1717
		return;
1718
 
1719
//	intel_cancel_fbc_work(dev_priv);
1720
 
1721
//	work = kzalloc(sizeof *work, GFP_KERNEL);
1722
//	if (work == NULL) {
1723
//		dev_priv->display.enable_fbc(crtc, interval);
1724
//		return;
1725
//	}
1726
 
1727
//	work->crtc = crtc;
1728
//	work->fb = crtc->fb;
1729
//	work->interval = interval;
1730
//	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1731
 
1732
//	dev_priv->fbc_work = work;
1733
 
1734
	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1735
 
1736
	/* Delay the actual enabling to let pageflipping cease and the
1737
	 * display to settle before starting the compression. Note that
1738
	 * this delay also serves a second purpose: it allows for a
1739
	 * vblank to pass after disabling the FBC before we attempt
1740
	 * to modify the control registers.
1741
	 *
1742
	 * A more complicated solution would involve tracking vblanks
1743
	 * following the termination of the page-flipping sequence
1744
	 * and indeed performing the enable as a co-routine and not
1745
	 * waiting synchronously upon the vblank.
1746
	 */
1747
//	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1748
}
1749
 
1750
void intel_disable_fbc(struct drm_device *dev)
1751
{
1752
	struct drm_i915_private *dev_priv = dev->dev_private;
1753
 
1754
//   intel_cancel_fbc_work(dev_priv);
1755
 
1756
	if (!dev_priv->display.disable_fbc)
1757
		return;
1758
 
1759
	dev_priv->display.disable_fbc(dev);
1760
	dev_priv->cfb_plane = -1;
1761
}
1762
 
1763
/**
1764
 * intel_update_fbc - enable/disable FBC as needed
1765
 * @dev: the drm_device
1766
 *
1767
 * Set up the framebuffer compression hardware at mode set time.  We
1768
 * enable it if possible:
1769
 *   - plane A only (on pre-965)
1770
 *   - no pixel mulitply/line duplication
1771
 *   - no alpha buffer discard
1772
 *   - no dual wide
1773
 *   - framebuffer <= 2048 in width, 1536 in height
1774
 *
1775
 * We can't assume that any compression will take place (worst case),
1776
 * so the compressed buffer has to be the same size as the uncompressed
1777
 * one.  It also must reside (along with the line length buffer) in
1778
 * stolen memory.
1779
 *
1780
 * We need to enable/disable FBC on a global basis.
1781
 */
1782
static void intel_update_fbc(struct drm_device *dev)
1783
{
1784
	struct drm_i915_private *dev_priv = dev->dev_private;
1785
	struct drm_crtc *crtc = NULL, *tmp_crtc;
1786
	struct intel_crtc *intel_crtc;
1787
	struct drm_framebuffer *fb;
1788
	struct intel_framebuffer *intel_fb;
1789
	struct drm_i915_gem_object *obj;
1790
 
1791
	DRM_DEBUG_KMS("\n");
1792
 
1793
	if (!i915_powersave)
1794
		return;
1795
 
1796
	if (!I915_HAS_FBC(dev))
1797
		return;
1798
 
1799
	/*
1800
	 * If FBC is already on, we just have to verify that we can
1801
	 * keep it that way...
1802
	 * Need to disable if:
1803
	 *   - more than one pipe is active
1804
	 *   - changing FBC params (stride, fence, mode)
1805
	 *   - new fb is too large to fit in compressed buffer
1806
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
1807
	 */
1808
	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1809
		if (tmp_crtc->enabled && tmp_crtc->fb) {
1810
			if (crtc) {
1811
				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
2336 Serge 1812
                dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
2327 Serge 1813
				goto out_disable;
1814
			}
1815
			crtc = tmp_crtc;
1816
		}
1817
	}
1818
 
1819
	if (!crtc || crtc->fb == NULL) {
1820
		DRM_DEBUG_KMS("no output, disabling\n");
2336 Serge 1821
        dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
2327 Serge 1822
		goto out_disable;
1823
	}
1824
 
1825
	intel_crtc = to_intel_crtc(crtc);
1826
	fb = crtc->fb;
1827
	intel_fb = to_intel_framebuffer(fb);
1828
	obj = intel_fb->obj;
1829
 
1830
	if (!i915_enable_fbc) {
1831
		DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
2336 Serge 1832
        dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
2327 Serge 1833
		goto out_disable;
1834
	}
1835
	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1836
		DRM_DEBUG_KMS("framebuffer too large, disabling "
1837
			      "compression\n");
2336 Serge 1838
        dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
2327 Serge 1839
		goto out_disable;
1840
	}
1841
	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1842
	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1843
		DRM_DEBUG_KMS("mode incompatible with compression, "
1844
			      "disabling\n");
2336 Serge 1845
        dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
2327 Serge 1846
		goto out_disable;
1847
	}
1848
	if ((crtc->mode.hdisplay > 2048) ||
1849
	    (crtc->mode.vdisplay > 1536)) {
1850
		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
2336 Serge 1851
        dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
2327 Serge 1852
		goto out_disable;
1853
	}
1854
	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1855
		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
2336 Serge 1856
        dev_priv->no_fbc_reason = FBC_BAD_PLANE;
2327 Serge 1857
		goto out_disable;
1858
	}
1859
 
1860
	/* The use of a CPU fence is mandatory in order to detect writes
1861
	 * by the CPU to the scanout and trigger updates to the FBC.
1862
	 */
1863
//	if (obj->tiling_mode != I915_TILING_X ||
1864
//	    obj->fence_reg == I915_FENCE_REG_NONE) {
1865
//		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1866
//		dev_priv->no_fbc_reason = FBC_NOT_TILED;
1867
//		goto out_disable;
1868
//	}
1869
 
1870
	/* If the kernel debugger is active, always disable compression */
1871
	if (in_dbg_master())
1872
		goto out_disable;
1873
 
1874
	/* If the scanout has not changed, don't modify the FBC settings.
1875
	 * Note that we make the fundamental assumption that the fb->obj
1876
	 * cannot be unpinned (and have its GTT offset and fence revoked)
1877
	 * without first being decoupled from the scanout and FBC disabled.
1878
	 */
1879
	if (dev_priv->cfb_plane == intel_crtc->plane &&
1880
	    dev_priv->cfb_fb == fb->base.id &&
1881
	    dev_priv->cfb_y == crtc->y)
1882
		return;
1883
 
1884
	if (intel_fbc_enabled(dev)) {
1885
		/* We update FBC along two paths, after changing fb/crtc
1886
		 * configuration (modeswitching) and after page-flipping
1887
		 * finishes. For the latter, we know that not only did
1888
		 * we disable the FBC at the start of the page-flip
1889
		 * sequence, but also more than one vblank has passed.
1890
		 *
1891
		 * For the former case of modeswitching, it is possible
1892
		 * to switch between two FBC valid configurations
1893
		 * instantaneously so we do need to disable the FBC
1894
		 * before we can modify its control registers. We also
1895
		 * have to wait for the next vblank for that to take
1896
		 * effect. However, since we delay enabling FBC we can
1897
		 * assume that a vblank has passed since disabling and
1898
		 * that we can safely alter the registers in the deferred
1899
		 * callback.
1900
		 *
1901
		 * In the scenario that we go from a valid to invalid
1902
		 * and then back to valid FBC configuration we have
1903
		 * no strict enforcement that a vblank occurred since
1904
		 * disabling the FBC. However, along all current pipe
1905
		 * disabling paths we do need to wait for a vblank at
1906
		 * some point. And we wait before enabling FBC anyway.
1907
		 */
1908
		DRM_DEBUG_KMS("disabling active FBC for update\n");
1909
		intel_disable_fbc(dev);
1910
	}
1911
 
1912
	intel_enable_fbc(crtc, 500);
1913
	return;
1914
 
1915
out_disable:
1916
	/* Multiple disables should be harmless */
1917
	if (intel_fbc_enabled(dev)) {
1918
		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1919
		intel_disable_fbc(dev);
1920
	}
1921
}
1922
 
2335 Serge 1923
int
1924
intel_pin_and_fence_fb_obj(struct drm_device *dev,
1925
			   struct drm_i915_gem_object *obj,
1926
			   struct intel_ring_buffer *pipelined)
1927
{
1928
	struct drm_i915_private *dev_priv = dev->dev_private;
1929
	u32 alignment;
1930
	int ret;
2327 Serge 1931
 
2335 Serge 1932
	switch (obj->tiling_mode) {
1933
	case I915_TILING_NONE:
1934
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1935
			alignment = 128 * 1024;
1936
		else if (INTEL_INFO(dev)->gen >= 4)
1937
			alignment = 4 * 1024;
1938
		else
1939
			alignment = 64 * 1024;
1940
		break;
1941
	case I915_TILING_X:
1942
		/* pin() will align the object as required by fence */
1943
		alignment = 0;
1944
		break;
1945
	case I915_TILING_Y:
1946
		/* FIXME: Is this true? */
1947
		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1948
		return -EINVAL;
1949
	default:
1950
		BUG();
1951
	}
2327 Serge 1952
 
2335 Serge 1953
	dev_priv->mm.interruptible = false;
1954
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1955
	if (ret)
1956
		goto err_interruptible;
2327 Serge 1957
 
2335 Serge 1958
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1959
	 * fence, whereas 965+ only requires a fence if using
1960
	 * framebuffer compression.  For simplicity, we always install
1961
	 * a fence as the cost is not that onerous.
1962
	 */
1963
//	if (obj->tiling_mode != I915_TILING_NONE) {
1964
//		ret = i915_gem_object_get_fence(obj, pipelined);
1965
//		if (ret)
1966
//			goto err_unpin;
1967
//	}
2327 Serge 1968
 
2335 Serge 1969
	dev_priv->mm.interruptible = true;
1970
	return 0;
2327 Serge 1971
 
2335 Serge 1972
err_unpin:
1973
//	i915_gem_object_unpin(obj);
1974
err_interruptible:
1975
	dev_priv->mm.interruptible = true;
1976
	return ret;
1977
}
2327 Serge 1978
 
1979
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1980
                 int x, int y)
1981
{
1982
    struct drm_device *dev = crtc->dev;
1983
    struct drm_i915_private *dev_priv = dev->dev_private;
1984
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1985
    struct intel_framebuffer *intel_fb;
1986
    struct drm_i915_gem_object *obj;
1987
    int plane = intel_crtc->plane;
1988
    unsigned long Start, Offset;
1989
    u32 dspcntr;
1990
    u32 reg;
1991
 
1992
    switch (plane) {
1993
    case 0:
1994
    case 1:
1995
        break;
1996
    default:
1997
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1998
        return -EINVAL;
1999
    }
2000
 
2001
    intel_fb = to_intel_framebuffer(fb);
2002
    obj = intel_fb->obj;
2003
 
2004
    reg = DSPCNTR(plane);
2005
    dspcntr = I915_READ(reg);
2006
    /* Mask out pixel format bits in case we change it */
2007
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2008
    switch (fb->bits_per_pixel) {
2009
    case 8:
2010
        dspcntr |= DISPPLANE_8BPP;
2011
        break;
2012
    case 16:
2013
        if (fb->depth == 15)
2014
            dspcntr |= DISPPLANE_15_16BPP;
2015
        else
2016
            dspcntr |= DISPPLANE_16BPP;
2017
        break;
2018
    case 24:
2019
    case 32:
2020
        dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2021
        break;
2022
    default:
2023
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2024
        return -EINVAL;
2025
    }
2026
    if (INTEL_INFO(dev)->gen >= 4) {
2027
        if (obj->tiling_mode != I915_TILING_NONE)
2028
            dspcntr |= DISPPLANE_TILED;
2029
        else
2030
            dspcntr &= ~DISPPLANE_TILED;
2031
    }
2032
 
2033
    I915_WRITE(reg, dspcntr);
2034
 
2035
    Start = obj->gtt_offset;
2036
    Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2037
 
2038
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2039
              Start, Offset, x, y, fb->pitch);
2040
    I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2041
    if (INTEL_INFO(dev)->gen >= 4) {
2042
        I915_WRITE(DSPSURF(plane), Start);
2043
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2044
        I915_WRITE(DSPADDR(plane), Offset);
2045
    } else
2046
        I915_WRITE(DSPADDR(plane), Start + Offset);
2047
    POSTING_READ(reg);
2048
 
2049
    return 0;
2050
}
2051
 
2052
static int ironlake_update_plane(struct drm_crtc *crtc,
2053
                 struct drm_framebuffer *fb, int x, int y)
2054
{
2055
    struct drm_device *dev = crtc->dev;
2056
    struct drm_i915_private *dev_priv = dev->dev_private;
2057
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2058
    struct intel_framebuffer *intel_fb;
2059
    struct drm_i915_gem_object *obj;
2060
    int plane = intel_crtc->plane;
2061
    unsigned long Start, Offset;
2062
    u32 dspcntr;
2063
    u32 reg;
2064
 
2065
    switch (plane) {
2066
    case 0:
2067
    case 1:
2068
        break;
2069
    default:
2070
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2071
        return -EINVAL;
2072
    }
2073
 
2074
    intel_fb = to_intel_framebuffer(fb);
2075
    obj = intel_fb->obj;
2076
 
2077
    reg = DSPCNTR(plane);
2078
    dspcntr = I915_READ(reg);
2079
    /* Mask out pixel format bits in case we change it */
2080
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2081
    switch (fb->bits_per_pixel) {
2082
    case 8:
2083
        dspcntr |= DISPPLANE_8BPP;
2084
        break;
2085
    case 16:
2086
        if (fb->depth != 16)
2087
            return -EINVAL;
2088
 
2089
        dspcntr |= DISPPLANE_16BPP;
2090
        break;
2091
    case 24:
2092
    case 32:
2093
        if (fb->depth == 24)
2094
            dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2095
        else if (fb->depth == 30)
2096
            dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2097
        else
2098
            return -EINVAL;
2099
        break;
2100
    default:
2101
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2102
        return -EINVAL;
2103
    }
2104
 
2105
//    if (obj->tiling_mode != I915_TILING_NONE)
2106
//        dspcntr |= DISPPLANE_TILED;
2107
//    else
2108
        dspcntr &= ~DISPPLANE_TILED;
2109
 
2110
    /* must disable */
2111
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2112
 
2113
    I915_WRITE(reg, dspcntr);
2114
 
2336 Serge 2115
    Start = obj->gtt_offset;
2116
    Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2327 Serge 2117
 
2118
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2119
              Start, Offset, x, y, fb->pitch);
2330 Serge 2120
	I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2121
	I915_WRITE(DSPSURF(plane), Start);
2122
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2123
	I915_WRITE(DSPADDR(plane), Offset);
2124
	POSTING_READ(reg);
2327 Serge 2125
 
2126
    return 0;
2127
}
2128
 
2129
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2130
static int
2131
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2132
			   int x, int y, enum mode_set_atomic state)
2133
{
2134
	struct drm_device *dev = crtc->dev;
2135
	struct drm_i915_private *dev_priv = dev->dev_private;
2136
	int ret;
2137
 
2336 Serge 2138
    ENTER();
2139
 
2327 Serge 2140
	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2141
	if (ret)
2336 Serge 2142
    {
2143
        LEAVE();
2327 Serge 2144
		return ret;
2336 Serge 2145
    };
2327 Serge 2146
 
2147
	intel_update_fbc(dev);
2148
	intel_increase_pllclock(crtc);
2336 Serge 2149
    LEAVE();
2327 Serge 2150
 
2151
	return 0;
2152
}
2153
 
2154
static int
2155
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2156
		    struct drm_framebuffer *old_fb)
2157
{
2158
	struct drm_device *dev = crtc->dev;
2159
	struct drm_i915_master_private *master_priv;
2160
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2336 Serge 2161
    int ret = 0;
2327 Serge 2162
 
2336 Serge 2163
    ENTER();
2164
 
2327 Serge 2165
	/* no fb bound */
2166
	if (!crtc->fb) {
2167
		DRM_ERROR("No FB bound\n");
2168
		return 0;
2169
	}
2170
 
2171
	switch (intel_crtc->plane) {
2172
	case 0:
2173
	case 1:
2174
		break;
2175
	default:
2176
		DRM_ERROR("no plane for crtc\n");
2177
		return -EINVAL;
2178
	}
2179
 
2180
	mutex_lock(&dev->struct_mutex);
2181
 
2336 Serge 2182
    ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2183
					 LEAVE_ATOMIC_MODE_SET);
2327 Serge 2184
	if (ret) {
2185
//       i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2186
		mutex_unlock(&dev->struct_mutex);
2187
		DRM_ERROR("failed to update base address\n");
2336 Serge 2188
        LEAVE();
2327 Serge 2189
		return ret;
2190
	}
2191
 
2336 Serge 2192
	mutex_unlock(&dev->struct_mutex);
2327 Serge 2193
 
2336 Serge 2194
 
2195
    LEAVE();
2196
    return 0;
2197
 
2330 Serge 2198
#if 0
2199
	if (!dev->primary->master)
2336 Serge 2200
    {
2201
        LEAVE();
2330 Serge 2202
		return 0;
2336 Serge 2203
    };
2327 Serge 2204
 
2330 Serge 2205
	master_priv = dev->primary->master->driver_priv;
2206
	if (!master_priv->sarea_priv)
2336 Serge 2207
    {
2208
        LEAVE();
2330 Serge 2209
		return 0;
2336 Serge 2210
    };
2327 Serge 2211
 
2330 Serge 2212
	if (intel_crtc->pipe) {
2213
		master_priv->sarea_priv->pipeB_x = x;
2214
		master_priv->sarea_priv->pipeB_y = y;
2215
	} else {
2216
		master_priv->sarea_priv->pipeA_x = x;
2217
		master_priv->sarea_priv->pipeA_y = y;
2218
	}
2336 Serge 2219
    LEAVE();
2220
 
2221
	return 0;
2330 Serge 2222
#endif
2336 Serge 2223
 
2327 Serge 2224
}
2225
 
2226
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2227
{
2228
	struct drm_device *dev = crtc->dev;
2229
	struct drm_i915_private *dev_priv = dev->dev_private;
2230
	u32 dpa_ctl;
2231
 
2232
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2233
	dpa_ctl = I915_READ(DP_A);
2234
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2235
 
2236
	if (clock < 200000) {
2237
		u32 temp;
2238
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2239
		/* workaround for 160Mhz:
2240
		   1) program 0x4600c bits 15:0 = 0x8124
2241
		   2) program 0x46010 bit 0 = 1
2242
		   3) program 0x46034 bit 24 = 1
2243
		   4) program 0x64000 bit 14 = 1
2244
		   */
2245
		temp = I915_READ(0x4600c);
2246
		temp &= 0xffff0000;
2247
		I915_WRITE(0x4600c, temp | 0x8124);
2248
 
2249
		temp = I915_READ(0x46010);
2250
		I915_WRITE(0x46010, temp | 1);
2251
 
2252
		temp = I915_READ(0x46034);
2253
		I915_WRITE(0x46034, temp | (1 << 24));
2254
	} else {
2255
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2256
	}
2257
	I915_WRITE(DP_A, dpa_ctl);
2258
 
2259
	POSTING_READ(DP_A);
2260
	udelay(500);
2261
}
2262
 
2263
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2264
{
2265
	struct drm_device *dev = crtc->dev;
2266
	struct drm_i915_private *dev_priv = dev->dev_private;
2267
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2268
	int pipe = intel_crtc->pipe;
2269
	u32 reg, temp;
2270
 
2271
	/* enable normal train */
2272
	reg = FDI_TX_CTL(pipe);
2273
	temp = I915_READ(reg);
2274
	if (IS_IVYBRIDGE(dev)) {
2275
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2276
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2277
	} else {
2278
		temp &= ~FDI_LINK_TRAIN_NONE;
2279
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2280
	}
2281
	I915_WRITE(reg, temp);
2282
 
2283
	reg = FDI_RX_CTL(pipe);
2284
	temp = I915_READ(reg);
2285
	if (HAS_PCH_CPT(dev)) {
2286
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2287
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2288
	} else {
2289
		temp &= ~FDI_LINK_TRAIN_NONE;
2290
		temp |= FDI_LINK_TRAIN_NONE;
2291
	}
2292
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2293
 
2294
	/* wait one idle pattern time */
2295
	POSTING_READ(reg);
2296
	udelay(1000);
2297
 
2298
	/* IVB wants error correction enabled */
2299
	if (IS_IVYBRIDGE(dev))
2300
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2301
			   FDI_FE_ERRC_ENABLE);
2302
}
2303
 
2304
static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2305
{
2306
	struct drm_i915_private *dev_priv = dev->dev_private;
2307
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2308
 
2309
	flags |= FDI_PHASE_SYNC_OVR(pipe);
2310
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2311
	flags |= FDI_PHASE_SYNC_EN(pipe);
2312
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2313
	POSTING_READ(SOUTH_CHICKEN1);
2314
}
2315
 
2316
/* The FDI link training functions for ILK/Ibexpeak. */
2317
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2318
{
2319
    struct drm_device *dev = crtc->dev;
2320
    struct drm_i915_private *dev_priv = dev->dev_private;
2321
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2322
    int pipe = intel_crtc->pipe;
2323
    int plane = intel_crtc->plane;
2324
    u32 reg, temp, tries;
2325
 
2326
    /* FDI needs bits from pipe & plane first */
2327
    assert_pipe_enabled(dev_priv, pipe);
2328
    assert_plane_enabled(dev_priv, plane);
2329
 
2330
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2331
       for train result */
2332
    reg = FDI_RX_IMR(pipe);
2333
    temp = I915_READ(reg);
2334
    temp &= ~FDI_RX_SYMBOL_LOCK;
2335
    temp &= ~FDI_RX_BIT_LOCK;
2336
    I915_WRITE(reg, temp);
2337
    I915_READ(reg);
2338
    udelay(150);
2339
 
2340
    /* enable CPU FDI TX and PCH FDI RX */
2341
    reg = FDI_TX_CTL(pipe);
2342
    temp = I915_READ(reg);
2343
    temp &= ~(7 << 19);
2344
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2345
    temp &= ~FDI_LINK_TRAIN_NONE;
2346
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2347
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2348
 
2349
    reg = FDI_RX_CTL(pipe);
2350
    temp = I915_READ(reg);
2351
    temp &= ~FDI_LINK_TRAIN_NONE;
2352
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2353
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2354
 
2355
    POSTING_READ(reg);
2356
    udelay(150);
2357
 
2358
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2359
    if (HAS_PCH_IBX(dev)) {
2360
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2361
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2362
               FDI_RX_PHASE_SYNC_POINTER_EN);
2363
    }
2364
 
2365
    reg = FDI_RX_IIR(pipe);
2366
    for (tries = 0; tries < 5; tries++) {
2367
        temp = I915_READ(reg);
2368
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2369
 
2370
        if ((temp & FDI_RX_BIT_LOCK)) {
2371
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2372
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2373
            break;
2374
        }
2375
    }
2376
    if (tries == 5)
2377
        DRM_ERROR("FDI train 1 fail!\n");
2378
 
2379
    /* Train 2 */
2380
    reg = FDI_TX_CTL(pipe);
2381
    temp = I915_READ(reg);
2382
    temp &= ~FDI_LINK_TRAIN_NONE;
2383
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2384
    I915_WRITE(reg, temp);
2385
 
2386
    reg = FDI_RX_CTL(pipe);
2387
    temp = I915_READ(reg);
2388
    temp &= ~FDI_LINK_TRAIN_NONE;
2389
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2390
    I915_WRITE(reg, temp);
2391
 
2392
    POSTING_READ(reg);
2393
    udelay(150);
2394
 
2395
    reg = FDI_RX_IIR(pipe);
2396
    for (tries = 0; tries < 5; tries++) {
2397
        temp = I915_READ(reg);
2398
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2399
 
2400
        if (temp & FDI_RX_SYMBOL_LOCK) {
2401
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2402
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2403
            break;
2404
        }
2405
    }
2406
    if (tries == 5)
2407
        DRM_ERROR("FDI train 2 fail!\n");
2408
 
2409
    DRM_DEBUG_KMS("FDI train done\n");
2410
 
2411
}
2412
 
2413
static const int snb_b_fdi_train_param [] = {
2414
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2415
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2416
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2417
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2418
};
2419
 
2420
/* The FDI link training functions for SNB/Cougarpoint. */
2421
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2422
{
2423
    struct drm_device *dev = crtc->dev;
2424
    struct drm_i915_private *dev_priv = dev->dev_private;
2425
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2426
    int pipe = intel_crtc->pipe;
2427
    u32 reg, temp, i;
2428
 
2429
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2430
       for train result */
2431
    reg = FDI_RX_IMR(pipe);
2432
    temp = I915_READ(reg);
2433
    temp &= ~FDI_RX_SYMBOL_LOCK;
2434
    temp &= ~FDI_RX_BIT_LOCK;
2435
    I915_WRITE(reg, temp);
2436
 
2437
    POSTING_READ(reg);
2438
    udelay(150);
2439
 
2440
    /* enable CPU FDI TX and PCH FDI RX */
2441
    reg = FDI_TX_CTL(pipe);
2442
    temp = I915_READ(reg);
2443
    temp &= ~(7 << 19);
2444
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2445
    temp &= ~FDI_LINK_TRAIN_NONE;
2446
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2447
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2448
    /* SNB-B */
2449
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2450
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2451
 
2452
    reg = FDI_RX_CTL(pipe);
2453
    temp = I915_READ(reg);
2454
    if (HAS_PCH_CPT(dev)) {
2455
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2456
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2457
    } else {
2458
        temp &= ~FDI_LINK_TRAIN_NONE;
2459
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2460
    }
2461
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2462
 
2463
    POSTING_READ(reg);
2464
    udelay(150);
2465
 
2466
    if (HAS_PCH_CPT(dev))
2467
        cpt_phase_pointer_enable(dev, pipe);
2468
 
2469
    for (i = 0; i < 4; i++ ) {
2470
        reg = FDI_TX_CTL(pipe);
2471
        temp = I915_READ(reg);
2472
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2473
        temp |= snb_b_fdi_train_param[i];
2474
        I915_WRITE(reg, temp);
2475
 
2476
        POSTING_READ(reg);
2477
        udelay(500);
2478
 
2479
        reg = FDI_RX_IIR(pipe);
2480
        temp = I915_READ(reg);
2481
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2482
 
2483
        if (temp & FDI_RX_BIT_LOCK) {
2484
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2485
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2486
            break;
2487
        }
2488
    }
2489
    if (i == 4)
2490
        DRM_ERROR("FDI train 1 fail!\n");
2491
 
2492
    /* Train 2 */
2493
    reg = FDI_TX_CTL(pipe);
2494
    temp = I915_READ(reg);
2495
    temp &= ~FDI_LINK_TRAIN_NONE;
2496
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2497
    if (IS_GEN6(dev)) {
2498
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2499
        /* SNB-B */
2500
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2501
    }
2502
    I915_WRITE(reg, temp);
2503
 
2504
    reg = FDI_RX_CTL(pipe);
2505
    temp = I915_READ(reg);
2506
    if (HAS_PCH_CPT(dev)) {
2507
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2508
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2509
    } else {
2510
        temp &= ~FDI_LINK_TRAIN_NONE;
2511
        temp |= FDI_LINK_TRAIN_PATTERN_2;
2512
    }
2513
    I915_WRITE(reg, temp);
2514
 
2515
    POSTING_READ(reg);
2516
    udelay(150);
2517
 
2518
    for (i = 0; i < 4; i++ ) {
2519
        reg = FDI_TX_CTL(pipe);
2520
        temp = I915_READ(reg);
2521
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2522
        temp |= snb_b_fdi_train_param[i];
2523
        I915_WRITE(reg, temp);
2524
 
2525
        POSTING_READ(reg);
2526
        udelay(500);
2527
 
2528
        reg = FDI_RX_IIR(pipe);
2529
        temp = I915_READ(reg);
2530
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2531
 
2532
        if (temp & FDI_RX_SYMBOL_LOCK) {
2533
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2534
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2535
            break;
2536
        }
2537
    }
2538
    if (i == 4)
2539
        DRM_ERROR("FDI train 2 fail!\n");
2540
 
2541
    DRM_DEBUG_KMS("FDI train done.\n");
2542
}
2543
 
2544
/* Manual link training for Ivy Bridge A0 parts */
2545
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2546
{
2547
    struct drm_device *dev = crtc->dev;
2548
    struct drm_i915_private *dev_priv = dev->dev_private;
2549
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2550
    int pipe = intel_crtc->pipe;
2551
    u32 reg, temp, i;
2552
 
2553
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2554
       for train result */
2555
    reg = FDI_RX_IMR(pipe);
2556
    temp = I915_READ(reg);
2557
    temp &= ~FDI_RX_SYMBOL_LOCK;
2558
    temp &= ~FDI_RX_BIT_LOCK;
2559
    I915_WRITE(reg, temp);
2560
 
2561
    POSTING_READ(reg);
2562
    udelay(150);
2563
 
2564
    /* enable CPU FDI TX and PCH FDI RX */
2565
    reg = FDI_TX_CTL(pipe);
2566
    temp = I915_READ(reg);
2567
    temp &= ~(7 << 19);
2568
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2569
    temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2570
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2571
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2572
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2573
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2574
 
2575
    reg = FDI_RX_CTL(pipe);
2576
    temp = I915_READ(reg);
2577
    temp &= ~FDI_LINK_TRAIN_AUTO;
2578
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2579
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2580
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2581
 
2582
    POSTING_READ(reg);
2583
    udelay(150);
2584
 
2585
    if (HAS_PCH_CPT(dev))
2586
        cpt_phase_pointer_enable(dev, pipe);
2587
 
2588
    for (i = 0; i < 4; i++ ) {
2589
        reg = FDI_TX_CTL(pipe);
2590
        temp = I915_READ(reg);
2591
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2592
        temp |= snb_b_fdi_train_param[i];
2593
        I915_WRITE(reg, temp);
2594
 
2595
        POSTING_READ(reg);
2596
        udelay(500);
2597
 
2598
        reg = FDI_RX_IIR(pipe);
2599
        temp = I915_READ(reg);
2600
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2601
 
2602
        if (temp & FDI_RX_BIT_LOCK ||
2603
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2604
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2605
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2606
            break;
2607
        }
2608
    }
2609
    if (i == 4)
2610
        DRM_ERROR("FDI train 1 fail!\n");
2611
 
2612
    /* Train 2 */
2613
    reg = FDI_TX_CTL(pipe);
2614
    temp = I915_READ(reg);
2615
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2616
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2617
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2618
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2619
    I915_WRITE(reg, temp);
2620
 
2621
    reg = FDI_RX_CTL(pipe);
2622
    temp = I915_READ(reg);
2623
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2624
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2625
    I915_WRITE(reg, temp);
2626
 
2627
    POSTING_READ(reg);
2628
    udelay(150);
2629
 
2630
    for (i = 0; i < 4; i++ ) {
2631
        reg = FDI_TX_CTL(pipe);
2632
        temp = I915_READ(reg);
2633
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2634
        temp |= snb_b_fdi_train_param[i];
2635
        I915_WRITE(reg, temp);
2636
 
2637
        POSTING_READ(reg);
2638
        udelay(500);
2639
 
2640
        reg = FDI_RX_IIR(pipe);
2641
        temp = I915_READ(reg);
2642
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2643
 
2644
        if (temp & FDI_RX_SYMBOL_LOCK) {
2645
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2646
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2647
            break;
2648
        }
2649
    }
2650
    if (i == 4)
2651
        DRM_ERROR("FDI train 2 fail!\n");
2652
 
2653
    DRM_DEBUG_KMS("FDI train done.\n");
2654
}
2655
 
2656
static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2657
{
2658
	struct drm_device *dev = crtc->dev;
2659
	struct drm_i915_private *dev_priv = dev->dev_private;
2660
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2661
	int pipe = intel_crtc->pipe;
2662
	u32 reg, temp;
2663
 
2664
	/* Write the TU size bits so error detection works */
2665
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2666
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2667
 
2668
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2669
	reg = FDI_RX_CTL(pipe);
2670
	temp = I915_READ(reg);
2671
	temp &= ~((0x7 << 19) | (0x7 << 16));
2672
	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2673
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2674
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2675
 
2676
	POSTING_READ(reg);
2677
	udelay(200);
2678
 
2679
	/* Switch from Rawclk to PCDclk */
2680
	temp = I915_READ(reg);
2681
	I915_WRITE(reg, temp | FDI_PCDCLK);
2682
 
2683
	POSTING_READ(reg);
2684
	udelay(200);
2685
 
2686
	/* Enable CPU FDI TX PLL, always on for Ironlake */
2687
	reg = FDI_TX_CTL(pipe);
2688
	temp = I915_READ(reg);
2689
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2690
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2691
 
2692
		POSTING_READ(reg);
2693
		udelay(100);
2694
	}
2695
}
2696
 
2697
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2698
{
2699
	struct drm_i915_private *dev_priv = dev->dev_private;
2700
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2701
 
2702
	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2703
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2704
	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2705
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2706
	POSTING_READ(SOUTH_CHICKEN1);
2707
}
2708
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2709
{
2710
	struct drm_device *dev = crtc->dev;
2711
	struct drm_i915_private *dev_priv = dev->dev_private;
2712
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2713
	int pipe = intel_crtc->pipe;
2714
	u32 reg, temp;
2715
 
2716
	/* disable CPU FDI tx and PCH FDI rx */
2717
	reg = FDI_TX_CTL(pipe);
2718
	temp = I915_READ(reg);
2719
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2720
	POSTING_READ(reg);
2721
 
2722
	reg = FDI_RX_CTL(pipe);
2723
	temp = I915_READ(reg);
2724
	temp &= ~(0x7 << 16);
2725
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2726
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2727
 
2728
	POSTING_READ(reg);
2729
	udelay(100);
2730
 
2731
	/* Ironlake workaround, disable clock pointer after downing FDI */
2732
	if (HAS_PCH_IBX(dev)) {
2733
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2734
		I915_WRITE(FDI_RX_CHICKEN(pipe),
2735
			   I915_READ(FDI_RX_CHICKEN(pipe) &
2736
				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2737
	} else if (HAS_PCH_CPT(dev)) {
2738
		cpt_phase_pointer_disable(dev, pipe);
2739
	}
2740
 
2741
	/* still set train pattern 1 */
2742
	reg = FDI_TX_CTL(pipe);
2743
	temp = I915_READ(reg);
2744
	temp &= ~FDI_LINK_TRAIN_NONE;
2745
	temp |= FDI_LINK_TRAIN_PATTERN_1;
2746
	I915_WRITE(reg, temp);
2747
 
2748
	reg = FDI_RX_CTL(pipe);
2749
	temp = I915_READ(reg);
2750
	if (HAS_PCH_CPT(dev)) {
2751
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2752
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2753
	} else {
2754
		temp &= ~FDI_LINK_TRAIN_NONE;
2755
		temp |= FDI_LINK_TRAIN_PATTERN_1;
2756
	}
2757
	/* BPC in FDI rx is consistent with that in PIPECONF */
2758
	temp &= ~(0x07 << 16);
2759
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2760
	I915_WRITE(reg, temp);
2761
 
2762
	POSTING_READ(reg);
2763
	udelay(100);
2764
}
2765
 
2766
/*
2767
 * When we disable a pipe, we need to clear any pending scanline wait events
2768
 * to avoid hanging the ring, which we assume we are waiting on.
2769
 */
2770
static void intel_clear_scanline_wait(struct drm_device *dev)
2771
{
2772
	struct drm_i915_private *dev_priv = dev->dev_private;
2773
	struct intel_ring_buffer *ring;
2774
	u32 tmp;
2775
 
2776
	if (IS_GEN2(dev))
2777
		/* Can't break the hang on i8xx */
2778
		return;
2779
 
2780
	ring = LP_RING(dev_priv);
2781
	tmp = I915_READ_CTL(ring);
2782
	if (tmp & RING_WAIT)
2783
		I915_WRITE_CTL(ring, tmp);
2784
}
2785
 
2786
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2787
{
2788
	struct drm_i915_gem_object *obj;
2789
	struct drm_i915_private *dev_priv;
2790
 
2791
	if (crtc->fb == NULL)
2792
		return;
2793
 
2794
	obj = to_intel_framebuffer(crtc->fb)->obj;
2795
	dev_priv = crtc->dev->dev_private;
2796
//	wait_event(dev_priv->pending_flip_queue,
2797
//		   atomic_read(&obj->pending_flip) == 0);
2798
}
2799
 
2800
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2801
{
2802
	struct drm_device *dev = crtc->dev;
2803
	struct drm_mode_config *mode_config = &dev->mode_config;
2804
	struct intel_encoder *encoder;
2805
 
2806
	/*
2807
	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2808
	 * must be driven by its own crtc; no sharing is possible.
2809
	 */
2810
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2811
		if (encoder->base.crtc != crtc)
2812
			continue;
2813
 
2814
		switch (encoder->type) {
2815
		case INTEL_OUTPUT_EDP:
2816
			if (!intel_encoder_is_pch_edp(&encoder->base))
2817
				return false;
2818
			continue;
2819
		}
2820
	}
2821
 
2822
	return true;
2823
}
2824
 
2825
/*
2826
 * Enable PCH resources required for PCH ports:
2827
 *   - PCH PLLs
2828
 *   - FDI training & RX/TX
2829
 *   - update transcoder timings
2830
 *   - DP transcoding bits
2831
 *   - transcoder
2832
 */
2833
static void ironlake_pch_enable(struct drm_crtc *crtc)
2834
{
2835
	struct drm_device *dev = crtc->dev;
2836
	struct drm_i915_private *dev_priv = dev->dev_private;
2837
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2838
	int pipe = intel_crtc->pipe;
2839
	u32 reg, temp;
2840
 
2841
	/* For PCH output, training FDI link */
2842
	dev_priv->display.fdi_link_train(crtc);
2843
 
2844
	intel_enable_pch_pll(dev_priv, pipe);
2845
 
2846
	if (HAS_PCH_CPT(dev)) {
2847
		/* Be sure PCH DPLL SEL is set */
2848
		temp = I915_READ(PCH_DPLL_SEL);
2849
		if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
2850
			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2851
		else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
2852
			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2853
		I915_WRITE(PCH_DPLL_SEL, temp);
2854
	}
2855
 
2856
	/* set transcoder timing, panel must allow it */
2857
	assert_panel_unlocked(dev_priv, pipe);
2858
	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2859
	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2860
	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2861
 
2862
	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2863
	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2864
	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2865
 
2866
	intel_fdi_normal_train(crtc);
2867
 
2868
	/* For PCH DP, enable TRANS_DP_CTL */
2869
	if (HAS_PCH_CPT(dev) &&
2870
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2871
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2872
		reg = TRANS_DP_CTL(pipe);
2873
		temp = I915_READ(reg);
2874
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2875
			  TRANS_DP_SYNC_MASK |
2876
			  TRANS_DP_BPC_MASK);
2877
		temp |= (TRANS_DP_OUTPUT_ENABLE |
2878
			 TRANS_DP_ENH_FRAMING);
2879
		temp |= bpc << 9; /* same format but at 11:9 */
2880
 
2881
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2882
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2883
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2884
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2885
 
2886
		switch (intel_trans_dp_port_sel(crtc)) {
2887
		case PCH_DP_B:
2888
			temp |= TRANS_DP_PORT_SEL_B;
2889
			break;
2890
		case PCH_DP_C:
2891
			temp |= TRANS_DP_PORT_SEL_C;
2892
			break;
2893
		case PCH_DP_D:
2894
			temp |= TRANS_DP_PORT_SEL_D;
2895
			break;
2896
		default:
2897
			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2898
			temp |= TRANS_DP_PORT_SEL_B;
2899
			break;
2900
		}
2901
 
2902
		I915_WRITE(reg, temp);
2903
	}
2904
 
2905
	intel_enable_transcoder(dev_priv, pipe);
2906
}
2907
 
2908
static void ironlake_crtc_enable(struct drm_crtc *crtc)
2909
{
2910
    struct drm_device *dev = crtc->dev;
2911
    struct drm_i915_private *dev_priv = dev->dev_private;
2912
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2913
    int pipe = intel_crtc->pipe;
2914
    int plane = intel_crtc->plane;
2915
    u32 temp;
2916
    bool is_pch_port;
2917
 
2918
    if (intel_crtc->active)
2919
        return;
2920
 
2921
    intel_crtc->active = true;
2922
    intel_update_watermarks(dev);
2923
 
2924
    if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2925
        temp = I915_READ(PCH_LVDS);
2926
        if ((temp & LVDS_PORT_EN) == 0)
2927
            I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2928
    }
2929
 
2930
    is_pch_port = intel_crtc_driving_pch(crtc);
2931
 
2932
    if (is_pch_port)
2933
        ironlake_fdi_pll_enable(crtc);
2934
    else
2935
        ironlake_fdi_disable(crtc);
2936
 
2937
    /* Enable panel fitting for LVDS */
2938
    if (dev_priv->pch_pf_size &&
2939
        (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2940
        /* Force use of hard-coded filter coefficients
2941
         * as some pre-programmed values are broken,
2942
         * e.g. x201.
2943
         */
2944
        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
2945
        I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
2946
        I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
2947
    }
2948
 
2949
    /*
2950
     * On ILK+ LUT must be loaded before the pipe is running but with
2951
     * clocks enabled
2952
     */
2953
    intel_crtc_load_lut(crtc);
2954
 
2955
    intel_enable_pipe(dev_priv, pipe, is_pch_port);
2956
    intel_enable_plane(dev_priv, plane, pipe);
2957
 
2958
    if (is_pch_port)
2959
        ironlake_pch_enable(crtc);
2960
 
2961
    mutex_lock(&dev->struct_mutex);
2962
    intel_update_fbc(dev);
2963
    mutex_unlock(&dev->struct_mutex);
2964
 
2965
//    intel_crtc_update_cursor(crtc, true);
2966
}
2967
 
2968
static void ironlake_crtc_disable(struct drm_crtc *crtc)
2969
{
2970
    struct drm_device *dev = crtc->dev;
2971
    struct drm_i915_private *dev_priv = dev->dev_private;
2972
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2973
    int pipe = intel_crtc->pipe;
2974
    int plane = intel_crtc->plane;
2975
    u32 reg, temp;
2976
 
2977
    if (!intel_crtc->active)
2978
        return;
2979
 
2336 Serge 2980
    ENTER();
2981
 
2327 Serge 2982
    intel_crtc_wait_for_pending_flips(crtc);
2983
//    drm_vblank_off(dev, pipe);
2984
//    intel_crtc_update_cursor(crtc, false);
2985
 
2986
    intel_disable_plane(dev_priv, plane, pipe);
2987
 
2988
    if (dev_priv->cfb_plane == plane)
2989
        intel_disable_fbc(dev);
2990
 
2991
    intel_disable_pipe(dev_priv, pipe);
2992
 
2993
    /* Disable PF */
2994
    I915_WRITE(PF_CTL(pipe), 0);
2995
    I915_WRITE(PF_WIN_SZ(pipe), 0);
2996
 
2997
    ironlake_fdi_disable(crtc);
2998
 
2999
    /* This is a horrible layering violation; we should be doing this in
3000
     * the connector/encoder ->prepare instead, but we don't always have
3001
     * enough information there about the config to know whether it will
3002
     * actually be necessary or just cause undesired flicker.
3003
     */
3004
    intel_disable_pch_ports(dev_priv, pipe);
3005
 
3006
    intel_disable_transcoder(dev_priv, pipe);
3007
 
3008
    if (HAS_PCH_CPT(dev)) {
3009
        /* disable TRANS_DP_CTL */
3010
        reg = TRANS_DP_CTL(pipe);
3011
        temp = I915_READ(reg);
3012
        temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3013
        temp |= TRANS_DP_PORT_SEL_NONE;
3014
        I915_WRITE(reg, temp);
3015
 
3016
        /* disable DPLL_SEL */
3017
        temp = I915_READ(PCH_DPLL_SEL);
3018
        switch (pipe) {
3019
        case 0:
3020
            temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3021
            break;
3022
        case 1:
3023
            temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3024
            break;
3025
        case 2:
3026
            /* FIXME: manage transcoder PLLs? */
3027
            temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3028
            break;
3029
        default:
3030
            BUG(); /* wtf */
3031
        }
3032
        I915_WRITE(PCH_DPLL_SEL, temp);
3033
    }
3034
 
3035
    /* disable PCH DPLL */
3036
    intel_disable_pch_pll(dev_priv, pipe);
3037
 
3038
    /* Switch from PCDclk to Rawclk */
3039
    reg = FDI_RX_CTL(pipe);
3040
    temp = I915_READ(reg);
3041
    I915_WRITE(reg, temp & ~FDI_PCDCLK);
3042
 
3043
    /* Disable CPU FDI TX PLL */
3044
    reg = FDI_TX_CTL(pipe);
3045
    temp = I915_READ(reg);
3046
    I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3047
 
3048
    POSTING_READ(reg);
3049
    udelay(100);
3050
 
3051
    reg = FDI_RX_CTL(pipe);
3052
    temp = I915_READ(reg);
3053
    I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3054
 
3055
    /* Wait for the clocks to turn off. */
3056
    POSTING_READ(reg);
3057
    udelay(100);
3058
 
3059
    intel_crtc->active = false;
3060
    intel_update_watermarks(dev);
3061
 
3062
    mutex_lock(&dev->struct_mutex);
3063
    intel_update_fbc(dev);
3064
    intel_clear_scanline_wait(dev);
3065
    mutex_unlock(&dev->struct_mutex);
2336 Serge 3066
 
3067
    LEAVE();
3068
 
2327 Serge 3069
}
3070
 
3071
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3072
{
3073
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3074
    int pipe = intel_crtc->pipe;
3075
    int plane = intel_crtc->plane;
3076
 
3077
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3078
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3079
     */
3080
    switch (mode) {
3081
    case DRM_MODE_DPMS_ON:
3082
    case DRM_MODE_DPMS_STANDBY:
3083
    case DRM_MODE_DPMS_SUSPEND:
3084
        DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3085
        ironlake_crtc_enable(crtc);
3086
        break;
3087
 
3088
    case DRM_MODE_DPMS_OFF:
3089
        DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3090
        ironlake_crtc_disable(crtc);
3091
        break;
3092
    }
3093
}
3094
 
3095
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3096
{
3097
	if (!enable && intel_crtc->overlay) {
3098
		struct drm_device *dev = intel_crtc->base.dev;
3099
		struct drm_i915_private *dev_priv = dev->dev_private;
3100
 
3101
		mutex_lock(&dev->struct_mutex);
3102
		dev_priv->mm.interruptible = false;
3103
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3104
		dev_priv->mm.interruptible = true;
3105
		mutex_unlock(&dev->struct_mutex);
3106
	}
3107
 
3108
	/* Let userspace switch the overlay on again. In most cases userspace
3109
	 * has to recompute where to put it anyway.
3110
	 */
3111
}
3112
 
3113
static void i9xx_crtc_enable(struct drm_crtc *crtc)
3114
{
3115
    struct drm_device *dev = crtc->dev;
3116
    struct drm_i915_private *dev_priv = dev->dev_private;
3117
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3118
    int pipe = intel_crtc->pipe;
3119
    int plane = intel_crtc->plane;
3120
 
3121
    if (intel_crtc->active)
3122
        return;
3123
 
3124
    intel_crtc->active = true;
3125
    intel_update_watermarks(dev);
3126
 
3127
    intel_enable_pll(dev_priv, pipe);
3128
    intel_enable_pipe(dev_priv, pipe, false);
3129
    intel_enable_plane(dev_priv, plane, pipe);
3130
 
3131
    intel_crtc_load_lut(crtc);
3132
    intel_update_fbc(dev);
3133
 
3134
    /* Give the overlay scaler a chance to enable if it's on this pipe */
3135
    intel_crtc_dpms_overlay(intel_crtc, true);
3136
//    intel_crtc_update_cursor(crtc, true);
3137
}
3138
 
3139
static void i9xx_crtc_disable(struct drm_crtc *crtc)
3140
{
3141
    struct drm_device *dev = crtc->dev;
3142
    struct drm_i915_private *dev_priv = dev->dev_private;
3143
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3144
    int pipe = intel_crtc->pipe;
3145
    int plane = intel_crtc->plane;
3146
 
3147
    if (!intel_crtc->active)
3148
        return;
3149
 
3150
    /* Give the overlay scaler a chance to disable if it's on this pipe */
3151
    intel_crtc_wait_for_pending_flips(crtc);
3152
//    drm_vblank_off(dev, pipe);
3153
    intel_crtc_dpms_overlay(intel_crtc, false);
3154
//    intel_crtc_update_cursor(crtc, false);
3155
 
3156
    if (dev_priv->cfb_plane == plane)
3157
        intel_disable_fbc(dev);
3158
 
3159
    intel_disable_plane(dev_priv, plane, pipe);
3160
    intel_disable_pipe(dev_priv, pipe);
3161
    intel_disable_pll(dev_priv, pipe);
3162
 
3163
    intel_crtc->active = false;
3164
    intel_update_fbc(dev);
3165
    intel_update_watermarks(dev);
3166
    intel_clear_scanline_wait(dev);
3167
}
3168
 
3169
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3170
{
3171
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3172
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3173
     */
3174
    switch (mode) {
3175
    case DRM_MODE_DPMS_ON:
3176
    case DRM_MODE_DPMS_STANDBY:
3177
    case DRM_MODE_DPMS_SUSPEND:
3178
        i9xx_crtc_enable(crtc);
3179
        break;
3180
    case DRM_MODE_DPMS_OFF:
3181
        i9xx_crtc_disable(crtc);
3182
        break;
3183
    }
3184
}
3185
 
2330 Serge 3186
/**
3187
 * Sets the power management mode of the pipe and plane.
3188
 */
3189
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3190
{
3191
	struct drm_device *dev = crtc->dev;
3192
	struct drm_i915_private *dev_priv = dev->dev_private;
3193
	struct drm_i915_master_private *master_priv;
3194
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3195
	int pipe = intel_crtc->pipe;
3196
	bool enabled;
2327 Serge 3197
 
2330 Serge 3198
	if (intel_crtc->dpms_mode == mode)
3199
		return;
2327 Serge 3200
 
2330 Serge 3201
	intel_crtc->dpms_mode = mode;
2327 Serge 3202
 
2330 Serge 3203
	dev_priv->display.dpms(crtc, mode);
2327 Serge 3204
 
2340 Serge 3205
#if 0
2330 Serge 3206
	if (!dev->primary->master)
3207
		return;
2327 Serge 3208
 
2330 Serge 3209
	master_priv = dev->primary->master->driver_priv;
3210
	if (!master_priv->sarea_priv)
3211
		return;
2327 Serge 3212
 
2330 Serge 3213
	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
2327 Serge 3214
 
2330 Serge 3215
	switch (pipe) {
3216
	case 0:
3217
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3218
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3219
		break;
3220
	case 1:
3221
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3222
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3223
		break;
3224
	default:
3225
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3226
		break;
3227
	}
2340 Serge 3228
#endif
3229
 
2330 Serge 3230
}
2327 Serge 3231
 
2330 Serge 3232
static void intel_crtc_disable(struct drm_crtc *crtc)
3233
{
3234
	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3235
	struct drm_device *dev = crtc->dev;
2327 Serge 3236
 
2330 Serge 3237
	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
2327 Serge 3238
 
2330 Serge 3239
	if (crtc->fb) {
3240
		mutex_lock(&dev->struct_mutex);
3241
//		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
3242
		mutex_unlock(&dev->struct_mutex);
3243
	}
3244
}
2327 Serge 3245
 
2330 Serge 3246
/* Prepare for a mode set.
3247
 *
3248
 * Note we could be a lot smarter here.  We need to figure out which outputs
3249
 * will be enabled, which disabled (in short, how the config will changes)
3250
 * and perform the minimum necessary steps to accomplish that, e.g. updating
3251
 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3252
 * panel fitting is in the proper state, etc.
3253
 */
3254
static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3255
{
3256
	i9xx_crtc_disable(crtc);
3257
}
2327 Serge 3258
 
2330 Serge 3259
static void i9xx_crtc_commit(struct drm_crtc *crtc)
3260
{
3261
	i9xx_crtc_enable(crtc);
3262
}
2327 Serge 3263
 
2330 Serge 3264
static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3265
{
3266
	ironlake_crtc_disable(crtc);
3267
}
2327 Serge 3268
 
2330 Serge 3269
static void ironlake_crtc_commit(struct drm_crtc *crtc)
3270
{
3271
	ironlake_crtc_enable(crtc);
3272
}
2327 Serge 3273
 
2330 Serge 3274
void intel_encoder_prepare (struct drm_encoder *encoder)
3275
{
3276
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3277
	/* lvds has its own version of prepare see intel_lvds_prepare */
3278
	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3279
}
2327 Serge 3280
 
2330 Serge 3281
void intel_encoder_commit (struct drm_encoder *encoder)
3282
{
3283
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3284
	/* lvds has its own version of commit see intel_lvds_commit */
3285
	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3286
}
2327 Serge 3287
 
2330 Serge 3288
void intel_encoder_destroy(struct drm_encoder *encoder)
3289
{
3290
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3291
 
3292
	drm_encoder_cleanup(encoder);
3293
	kfree(intel_encoder);
3294
}
3295
 
3296
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3297
				  struct drm_display_mode *mode,
3298
				  struct drm_display_mode *adjusted_mode)
3299
{
3300
	struct drm_device *dev = crtc->dev;
3301
 
3302
	if (HAS_PCH_SPLIT(dev)) {
3303
		/* FDI link clock is fixed at 2.7G */
3304
		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3305
			return false;
3306
	}
3307
 
3308
	/* XXX some encoders set the crtcinfo, others don't.
3309
	 * Obviously we need some form of conflict resolution here...
3310
	 */
3311
	if (adjusted_mode->crtc_htotal == 0)
3312
		drm_mode_set_crtcinfo(adjusted_mode, 0);
3313
 
3314
	return true;
3315
}
3316
 
2327 Serge 3317
static int i945_get_display_clock_speed(struct drm_device *dev)
3318
{
3319
	return 400000;
3320
}
3321
 
3322
static int i915_get_display_clock_speed(struct drm_device *dev)
3323
{
3324
	return 333000;
3325
}
3326
 
3327
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3328
{
3329
	return 200000;
3330
}
3331
 
3332
static int i915gm_get_display_clock_speed(struct drm_device *dev)
3333
{
3334
	u16 gcfgc = 0;
3335
 
3336
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3337
 
3338
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3339
		return 133000;
3340
	else {
3341
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3342
		case GC_DISPLAY_CLOCK_333_MHZ:
3343
			return 333000;
3344
		default:
3345
		case GC_DISPLAY_CLOCK_190_200_MHZ:
3346
			return 190000;
3347
		}
3348
	}
3349
}
3350
 
3351
static int i865_get_display_clock_speed(struct drm_device *dev)
3352
{
3353
	return 266000;
3354
}
3355
 
3356
static int i855_get_display_clock_speed(struct drm_device *dev)
3357
{
3358
	u16 hpllcc = 0;
3359
	/* Assume that the hardware is in the high speed state.  This
3360
	 * should be the default.
3361
	 */
3362
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3363
	case GC_CLOCK_133_200:
3364
	case GC_CLOCK_100_200:
3365
		return 200000;
3366
	case GC_CLOCK_166_250:
3367
		return 250000;
3368
	case GC_CLOCK_100_133:
3369
		return 133000;
3370
	}
3371
 
3372
	/* Shouldn't happen */
3373
	return 0;
3374
}
3375
 
3376
static int i830_get_display_clock_speed(struct drm_device *dev)
3377
{
3378
	return 133000;
3379
}
3380
 
3381
struct fdi_m_n {
3382
    u32        tu;
3383
    u32        gmch_m;
3384
    u32        gmch_n;
3385
    u32        link_m;
3386
    u32        link_n;
3387
};
3388
 
3389
static void
3390
fdi_reduce_ratio(u32 *num, u32 *den)
3391
{
3392
	while (*num > 0xffffff || *den > 0xffffff) {
3393
		*num >>= 1;
3394
		*den >>= 1;
3395
	}
3396
}
3397
 
3398
static void
3399
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3400
		     int link_clock, struct fdi_m_n *m_n)
3401
{
3402
	m_n->tu = 64; /* default size */
3403
 
3404
	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3405
	m_n->gmch_m = bits_per_pixel * pixel_clock;
3406
	m_n->gmch_n = link_clock * nlanes * 8;
3407
	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3408
 
3409
	m_n->link_m = pixel_clock;
3410
	m_n->link_n = link_clock;
3411
	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3412
}
3413
 
3414
 
3415
struct intel_watermark_params {
3416
    unsigned long fifo_size;
3417
    unsigned long max_wm;
3418
    unsigned long default_wm;
3419
    unsigned long guard_size;
3420
    unsigned long cacheline_size;
3421
};
3422
 
3423
/* Pineview has different values for various configs */
3424
static const struct intel_watermark_params pineview_display_wm = {
3425
    PINEVIEW_DISPLAY_FIFO,
3426
    PINEVIEW_MAX_WM,
3427
    PINEVIEW_DFT_WM,
3428
    PINEVIEW_GUARD_WM,
3429
    PINEVIEW_FIFO_LINE_SIZE
3430
};
3431
static const struct intel_watermark_params pineview_display_hplloff_wm = {
3432
    PINEVIEW_DISPLAY_FIFO,
3433
    PINEVIEW_MAX_WM,
3434
    PINEVIEW_DFT_HPLLOFF_WM,
3435
    PINEVIEW_GUARD_WM,
3436
    PINEVIEW_FIFO_LINE_SIZE
3437
};
3438
static const struct intel_watermark_params pineview_cursor_wm = {
3439
    PINEVIEW_CURSOR_FIFO,
3440
    PINEVIEW_CURSOR_MAX_WM,
3441
    PINEVIEW_CURSOR_DFT_WM,
3442
    PINEVIEW_CURSOR_GUARD_WM,
3443
    PINEVIEW_FIFO_LINE_SIZE,
3444
};
3445
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3446
    PINEVIEW_CURSOR_FIFO,
3447
    PINEVIEW_CURSOR_MAX_WM,
3448
    PINEVIEW_CURSOR_DFT_WM,
3449
    PINEVIEW_CURSOR_GUARD_WM,
3450
    PINEVIEW_FIFO_LINE_SIZE
3451
};
3452
static const struct intel_watermark_params g4x_wm_info = {
3453
    G4X_FIFO_SIZE,
3454
    G4X_MAX_WM,
3455
    G4X_MAX_WM,
3456
    2,
3457
    G4X_FIFO_LINE_SIZE,
3458
};
3459
static const struct intel_watermark_params g4x_cursor_wm_info = {
3460
    I965_CURSOR_FIFO,
3461
    I965_CURSOR_MAX_WM,
3462
    I965_CURSOR_DFT_WM,
3463
    2,
3464
    G4X_FIFO_LINE_SIZE,
3465
};
3466
static const struct intel_watermark_params i965_cursor_wm_info = {
3467
    I965_CURSOR_FIFO,
3468
    I965_CURSOR_MAX_WM,
3469
    I965_CURSOR_DFT_WM,
3470
    2,
3471
    I915_FIFO_LINE_SIZE,
3472
};
3473
static const struct intel_watermark_params i945_wm_info = {
3474
    I945_FIFO_SIZE,
3475
    I915_MAX_WM,
3476
    1,
3477
    2,
3478
    I915_FIFO_LINE_SIZE
3479
};
3480
static const struct intel_watermark_params i915_wm_info = {
3481
    I915_FIFO_SIZE,
3482
    I915_MAX_WM,
3483
    1,
3484
    2,
3485
    I915_FIFO_LINE_SIZE
3486
};
3487
static const struct intel_watermark_params i855_wm_info = {
3488
    I855GM_FIFO_SIZE,
3489
    I915_MAX_WM,
3490
    1,
3491
    2,
3492
    I830_FIFO_LINE_SIZE
3493
};
3494
static const struct intel_watermark_params i830_wm_info = {
3495
    I830_FIFO_SIZE,
3496
    I915_MAX_WM,
3497
    1,
3498
    2,
3499
    I830_FIFO_LINE_SIZE
3500
};
3501
 
3502
static const struct intel_watermark_params ironlake_display_wm_info = {
3503
    ILK_DISPLAY_FIFO,
3504
    ILK_DISPLAY_MAXWM,
3505
    ILK_DISPLAY_DFTWM,
3506
    2,
3507
    ILK_FIFO_LINE_SIZE
3508
};
3509
static const struct intel_watermark_params ironlake_cursor_wm_info = {
3510
    ILK_CURSOR_FIFO,
3511
    ILK_CURSOR_MAXWM,
3512
    ILK_CURSOR_DFTWM,
3513
    2,
3514
    ILK_FIFO_LINE_SIZE
3515
};
3516
static const struct intel_watermark_params ironlake_display_srwm_info = {
3517
    ILK_DISPLAY_SR_FIFO,
3518
    ILK_DISPLAY_MAX_SRWM,
3519
    ILK_DISPLAY_DFT_SRWM,
3520
    2,
3521
    ILK_FIFO_LINE_SIZE
3522
};
3523
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3524
    ILK_CURSOR_SR_FIFO,
3525
    ILK_CURSOR_MAX_SRWM,
3526
    ILK_CURSOR_DFT_SRWM,
3527
    2,
3528
    ILK_FIFO_LINE_SIZE
3529
};
3530
 
3531
static const struct intel_watermark_params sandybridge_display_wm_info = {
3532
    SNB_DISPLAY_FIFO,
3533
    SNB_DISPLAY_MAXWM,
3534
    SNB_DISPLAY_DFTWM,
3535
    2,
3536
    SNB_FIFO_LINE_SIZE
3537
};
3538
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3539
    SNB_CURSOR_FIFO,
3540
    SNB_CURSOR_MAXWM,
3541
    SNB_CURSOR_DFTWM,
3542
    2,
3543
    SNB_FIFO_LINE_SIZE
3544
};
3545
static const struct intel_watermark_params sandybridge_display_srwm_info = {
3546
    SNB_DISPLAY_SR_FIFO,
3547
    SNB_DISPLAY_MAX_SRWM,
3548
    SNB_DISPLAY_DFT_SRWM,
3549
    2,
3550
    SNB_FIFO_LINE_SIZE
3551
};
3552
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3553
    SNB_CURSOR_SR_FIFO,
3554
    SNB_CURSOR_MAX_SRWM,
3555
    SNB_CURSOR_DFT_SRWM,
3556
    2,
3557
    SNB_FIFO_LINE_SIZE
3558
};
3559
 
3560
 
3561
/**
3562
 * intel_calculate_wm - calculate watermark level
3563
 * @clock_in_khz: pixel clock
3564
 * @wm: chip FIFO params
3565
 * @pixel_size: display pixel size
3566
 * @latency_ns: memory latency for the platform
3567
 *
3568
 * Calculate the watermark level (the level at which the display plane will
3569
 * start fetching from memory again).  Each chip has a different display
3570
 * FIFO size and allocation, so the caller needs to figure that out and pass
3571
 * in the correct intel_watermark_params structure.
3572
 *
3573
 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3574
 * on the pixel size.  When it reaches the watermark level, it'll start
3575
 * fetching FIFO line sized based chunks from memory until the FIFO fills
3576
 * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3577
 * will occur, and a display engine hang could result.
3578
 */
3579
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3580
                    const struct intel_watermark_params *wm,
3581
                    int fifo_size,
3582
                    int pixel_size,
3583
                    unsigned long latency_ns)
3584
{
3585
    long entries_required, wm_size;
3586
 
3587
    /*
3588
     * Note: we need to make sure we don't overflow for various clock &
3589
     * latency values.
3590
     * clocks go from a few thousand to several hundred thousand.
3591
     * latency is usually a few thousand
3592
     */
3593
    entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3594
        1000;
3595
    entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3596
 
3597
    DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3598
 
3599
    wm_size = fifo_size - (entries_required + wm->guard_size);
3600
 
3601
    DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3602
 
3603
    /* Don't promote wm_size to unsigned... */
3604
    if (wm_size > (long)wm->max_wm)
3605
        wm_size = wm->max_wm;
3606
    if (wm_size <= 0)
3607
        wm_size = wm->default_wm;
3608
    return wm_size;
3609
}
3610
 
3611
struct cxsr_latency {
3612
    int is_desktop;
3613
    int is_ddr3;
3614
    unsigned long fsb_freq;
3615
    unsigned long mem_freq;
3616
    unsigned long display_sr;
3617
    unsigned long display_hpll_disable;
3618
    unsigned long cursor_sr;
3619
    unsigned long cursor_hpll_disable;
3620
};
3621
 
3622
static const struct cxsr_latency cxsr_latency_table[] = {
3623
    {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3624
    {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3625
    {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3626
    {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3627
    {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3628
 
3629
    {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3630
    {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3631
    {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3632
    {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3633
    {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3634
 
3635
    {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3636
    {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3637
    {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3638
    {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3639
    {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3640
 
3641
    {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3642
    {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3643
    {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3644
    {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3645
    {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3646
 
3647
    {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3648
    {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3649
    {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3650
    {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3651
    {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3652
 
3653
    {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3654
    {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3655
    {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3656
    {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3657
    {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3658
};
3659
 
3660
static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3661
                             int is_ddr3,
3662
                             int fsb,
3663
                             int mem)
3664
{
3665
    const struct cxsr_latency *latency;
3666
    int i;
3667
 
3668
    if (fsb == 0 || mem == 0)
3669
        return NULL;
3670
 
3671
    for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3672
        latency = &cxsr_latency_table[i];
3673
        if (is_desktop == latency->is_desktop &&
3674
            is_ddr3 == latency->is_ddr3 &&
3675
            fsb == latency->fsb_freq && mem == latency->mem_freq)
3676
            return latency;
3677
    }
3678
 
3679
    DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3680
 
3681
    return NULL;
3682
}
3683
 
3684
static void pineview_disable_cxsr(struct drm_device *dev)
3685
{
3686
    struct drm_i915_private *dev_priv = dev->dev_private;
3687
 
3688
    /* deactivate cxsr */
3689
    I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3690
}
3691
 
3692
/*
3693
 * Latency for FIFO fetches is dependent on several factors:
3694
 *   - memory configuration (speed, channels)
3695
 *   - chipset
3696
 *   - current MCH state
3697
 * It can be fairly high in some situations, so here we assume a fairly
3698
 * pessimal value.  It's a tradeoff between extra memory fetches (if we
3699
 * set this value too high, the FIFO will fetch frequently to stay full)
3700
 * and power consumption (set it too low to save power and we might see
3701
 * FIFO underruns and display "flicker").
3702
 *
3703
 * A value of 5us seems to be a good balance; safe for very low end
3704
 * platforms but not overly aggressive on lower latency configs.
3705
 */
3706
static const int latency_ns = 5000;
3707
 
3708
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3709
{
3710
	struct drm_i915_private *dev_priv = dev->dev_private;
3711
	uint32_t dsparb = I915_READ(DSPARB);
3712
	int size;
3713
 
3714
	size = dsparb & 0x7f;
3715
	if (plane)
3716
		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3717
 
3718
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3719
		      plane ? "B" : "A", size);
3720
 
3721
	return size;
3722
}
3723
 
3724
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3725
{
3726
	struct drm_i915_private *dev_priv = dev->dev_private;
3727
	uint32_t dsparb = I915_READ(DSPARB);
3728
	int size;
3729
 
3730
	size = dsparb & 0x1ff;
3731
	if (plane)
3732
		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3733
	size >>= 1; /* Convert to cachelines */
3734
 
3735
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3736
		      plane ? "B" : "A", size);
3737
 
3738
	return size;
3739
}
3740
 
3741
static int i845_get_fifo_size(struct drm_device *dev, int plane)
3742
{
3743
	struct drm_i915_private *dev_priv = dev->dev_private;
3744
	uint32_t dsparb = I915_READ(DSPARB);
3745
	int size;
3746
 
3747
	size = dsparb & 0x7f;
3748
	size >>= 2; /* Convert to cachelines */
3749
 
3750
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3751
		      plane ? "B" : "A",
3752
		      size);
3753
 
3754
	return size;
3755
}
3756
 
3757
static int i830_get_fifo_size(struct drm_device *dev, int plane)
3758
{
3759
	struct drm_i915_private *dev_priv = dev->dev_private;
3760
	uint32_t dsparb = I915_READ(DSPARB);
3761
	int size;
3762
 
3763
	size = dsparb & 0x7f;
3764
	size >>= 1; /* Convert to cachelines */
3765
 
3766
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3767
		      plane ? "B" : "A", size);
3768
 
3769
	return size;
3770
}
3771
 
3772
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3773
{
3774
    struct drm_crtc *crtc, *enabled = NULL;
3775
 
3776
    list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3777
        if (crtc->enabled && crtc->fb) {
3778
            if (enabled)
3779
                return NULL;
3780
            enabled = crtc;
3781
        }
3782
    }
3783
 
3784
    return enabled;
3785
}
3786
 
3787
static void pineview_update_wm(struct drm_device *dev)
3788
{
3789
	struct drm_i915_private *dev_priv = dev->dev_private;
3790
	struct drm_crtc *crtc;
3791
	const struct cxsr_latency *latency;
3792
	u32 reg;
3793
	unsigned long wm;
3794
 
3795
	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3796
					 dev_priv->fsb_freq, dev_priv->mem_freq);
3797
	if (!latency) {
3798
		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3799
		pineview_disable_cxsr(dev);
3800
		return;
3801
	}
3802
 
3803
	crtc = single_enabled_crtc(dev);
3804
	if (crtc) {
3805
		int clock = crtc->mode.clock;
3806
		int pixel_size = crtc->fb->bits_per_pixel / 8;
3807
 
3808
		/* Display SR */
3809
		wm = intel_calculate_wm(clock, &pineview_display_wm,
3810
					pineview_display_wm.fifo_size,
3811
					pixel_size, latency->display_sr);
3812
		reg = I915_READ(DSPFW1);
3813
		reg &= ~DSPFW_SR_MASK;
3814
		reg |= wm << DSPFW_SR_SHIFT;
3815
		I915_WRITE(DSPFW1, reg);
3816
		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3817
 
3818
		/* cursor SR */
3819
		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3820
					pineview_display_wm.fifo_size,
3821
					pixel_size, latency->cursor_sr);
3822
		reg = I915_READ(DSPFW3);
3823
		reg &= ~DSPFW_CURSOR_SR_MASK;
3824
		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3825
		I915_WRITE(DSPFW3, reg);
3826
 
3827
		/* Display HPLL off SR */
3828
		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3829
					pineview_display_hplloff_wm.fifo_size,
3830
					pixel_size, latency->display_hpll_disable);
3831
		reg = I915_READ(DSPFW3);
3832
		reg &= ~DSPFW_HPLL_SR_MASK;
3833
		reg |= wm & DSPFW_HPLL_SR_MASK;
3834
		I915_WRITE(DSPFW3, reg);
3835
 
3836
		/* cursor HPLL off SR */
3837
		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
3838
					pineview_display_hplloff_wm.fifo_size,
3839
					pixel_size, latency->cursor_hpll_disable);
3840
		reg = I915_READ(DSPFW3);
3841
		reg &= ~DSPFW_HPLL_CURSOR_MASK;
3842
		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
3843
		I915_WRITE(DSPFW3, reg);
3844
		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
3845
 
3846
		/* activate cxsr */
3847
		I915_WRITE(DSPFW3,
3848
			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
3849
		DRM_DEBUG_KMS("Self-refresh is enabled\n");
3850
	} else {
3851
		pineview_disable_cxsr(dev);
3852
		DRM_DEBUG_KMS("Self-refresh is disabled\n");
3853
	}
3854
}
3855
 
3856
static bool g4x_compute_wm0(struct drm_device *dev,
3857
                int plane,
3858
                const struct intel_watermark_params *display,
3859
                int display_latency_ns,
3860
                const struct intel_watermark_params *cursor,
3861
                int cursor_latency_ns,
3862
                int *plane_wm,
3863
                int *cursor_wm)
3864
{
3865
    struct drm_crtc *crtc;
3866
    int htotal, hdisplay, clock, pixel_size;
3867
    int line_time_us, line_count;
3868
    int entries, tlb_miss;
3869
 
3870
    crtc = intel_get_crtc_for_plane(dev, plane);
3871
    if (crtc->fb == NULL || !crtc->enabled) {
3872
        *cursor_wm = cursor->guard_size;
3873
        *plane_wm = display->guard_size;
3874
        return false;
3875
    }
3876
 
3877
    htotal = crtc->mode.htotal;
3878
    hdisplay = crtc->mode.hdisplay;
3879
    clock = crtc->mode.clock;
3880
    pixel_size = crtc->fb->bits_per_pixel / 8;
3881
 
3882
    /* Use the small buffer method to calculate plane watermark */
3883
    entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3884
    tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3885
    if (tlb_miss > 0)
3886
        entries += tlb_miss;
3887
    entries = DIV_ROUND_UP(entries, display->cacheline_size);
3888
    *plane_wm = entries + display->guard_size;
3889
    if (*plane_wm > (int)display->max_wm)
3890
        *plane_wm = display->max_wm;
3891
 
3892
    /* Use the large buffer method to calculate cursor watermark */
3893
    line_time_us = ((htotal * 1000) / clock);
3894
    line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3895
    entries = line_count * 64 * pixel_size;
3896
    tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3897
    if (tlb_miss > 0)
3898
        entries += tlb_miss;
3899
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3900
    *cursor_wm = entries + cursor->guard_size;
3901
    if (*cursor_wm > (int)cursor->max_wm)
3902
        *cursor_wm = (int)cursor->max_wm;
3903
 
3904
    return true;
3905
}
3906
 
3907
/*
3908
 * Check the wm result.
3909
 *
3910
 * If any calculated watermark values is larger than the maximum value that
3911
 * can be programmed into the associated watermark register, that watermark
3912
 * must be disabled.
3913
 */
3914
static bool g4x_check_srwm(struct drm_device *dev,
3915
			   int display_wm, int cursor_wm,
3916
			   const struct intel_watermark_params *display,
3917
			   const struct intel_watermark_params *cursor)
3918
{
3919
	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
3920
		      display_wm, cursor_wm);
3921
 
3922
	if (display_wm > display->max_wm) {
3923
		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
3924
			      display_wm, display->max_wm);
3925
		return false;
3926
	}
3927
 
3928
	if (cursor_wm > cursor->max_wm) {
3929
		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
3930
			      cursor_wm, cursor->max_wm);
3931
		return false;
3932
	}
3933
 
3934
	if (!(display_wm || cursor_wm)) {
3935
		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
3936
		return false;
3937
	}
3938
 
3939
	return true;
3940
}
3941
 
3942
static bool g4x_compute_srwm(struct drm_device *dev,
3943
			     int plane,
3944
			     int latency_ns,
3945
			     const struct intel_watermark_params *display,
3946
			     const struct intel_watermark_params *cursor,
3947
			     int *display_wm, int *cursor_wm)
3948
{
3949
	struct drm_crtc *crtc;
3950
	int hdisplay, htotal, pixel_size, clock;
3951
	unsigned long line_time_us;
3952
	int line_count, line_size;
3953
	int small, large;
3954
	int entries;
3955
 
3956
	if (!latency_ns) {
3957
		*display_wm = *cursor_wm = 0;
3958
		return false;
3959
	}
3960
 
3961
	crtc = intel_get_crtc_for_plane(dev, plane);
3962
	hdisplay = crtc->mode.hdisplay;
3963
	htotal = crtc->mode.htotal;
3964
	clock = crtc->mode.clock;
3965
	pixel_size = crtc->fb->bits_per_pixel / 8;
3966
 
3967
	line_time_us = (htotal * 1000) / clock;
3968
	line_count = (latency_ns / line_time_us + 1000) / 1000;
3969
	line_size = hdisplay * pixel_size;
3970
 
3971
	/* Use the minimum of the small and large buffer method for primary */
3972
	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3973
	large = line_count * line_size;
3974
 
3975
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3976
	*display_wm = entries + display->guard_size;
3977
 
3978
	/* calculate the self-refresh watermark for display cursor */
3979
	entries = line_count * pixel_size * 64;
3980
	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3981
	*cursor_wm = entries + cursor->guard_size;
3982
 
3983
	return g4x_check_srwm(dev,
3984
			      *display_wm, *cursor_wm,
3985
			      display, cursor);
3986
}
3987
 
3988
#define single_plane_enabled(mask) is_power_of_2(mask)
3989
 
3990
static void g4x_update_wm(struct drm_device *dev)
3991
{
3992
	static const int sr_latency_ns = 12000;
3993
	struct drm_i915_private *dev_priv = dev->dev_private;
3994
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3995
	int plane_sr, cursor_sr;
3996
	unsigned int enabled = 0;
3997
 
3998
	if (g4x_compute_wm0(dev, 0,
3999
			    &g4x_wm_info, latency_ns,
4000
			    &g4x_cursor_wm_info, latency_ns,
4001
			    &planea_wm, &cursora_wm))
4002
		enabled |= 1;
4003
 
4004
	if (g4x_compute_wm0(dev, 1,
4005
			    &g4x_wm_info, latency_ns,
4006
			    &g4x_cursor_wm_info, latency_ns,
4007
			    &planeb_wm, &cursorb_wm))
4008
		enabled |= 2;
4009
 
4010
	plane_sr = cursor_sr = 0;
4011
	if (single_plane_enabled(enabled) &&
4012
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
4013
			     sr_latency_ns,
4014
			     &g4x_wm_info,
4015
			     &g4x_cursor_wm_info,
4016
			     &plane_sr, &cursor_sr))
4017
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4018
	else
4019
		I915_WRITE(FW_BLC_SELF,
4020
			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4021
 
4022
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4023
		      planea_wm, cursora_wm,
4024
		      planeb_wm, cursorb_wm,
4025
		      plane_sr, cursor_sr);
4026
 
4027
	I915_WRITE(DSPFW1,
4028
		   (plane_sr << DSPFW_SR_SHIFT) |
4029
		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4030
		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
4031
		   planea_wm);
4032
	I915_WRITE(DSPFW2,
4033
		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4034
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
4035
	/* HPLL off in SR has some issues on G4x... disable it */
4036
	I915_WRITE(DSPFW3,
4037
		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4038
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4039
}
4040
 
4041
static void i965_update_wm(struct drm_device *dev)
4042
{
4043
	struct drm_i915_private *dev_priv = dev->dev_private;
4044
	struct drm_crtc *crtc;
4045
	int srwm = 1;
4046
	int cursor_sr = 16;
4047
 
4048
	/* Calc sr entries for one plane configs */
4049
	crtc = single_enabled_crtc(dev);
4050
	if (crtc) {
4051
		/* self-refresh has much higher latency */
4052
		static const int sr_latency_ns = 12000;
4053
		int clock = crtc->mode.clock;
4054
		int htotal = crtc->mode.htotal;
4055
		int hdisplay = crtc->mode.hdisplay;
4056
		int pixel_size = crtc->fb->bits_per_pixel / 8;
4057
		unsigned long line_time_us;
4058
		int entries;
4059
 
4060
		line_time_us = ((htotal * 1000) / clock);
4061
 
4062
		/* Use ns/us then divide to preserve precision */
4063
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4064
			pixel_size * hdisplay;
4065
		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4066
		srwm = I965_FIFO_SIZE - entries;
4067
		if (srwm < 0)
4068
			srwm = 1;
4069
		srwm &= 0x1ff;
4070
		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4071
			      entries, srwm);
4072
 
4073
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4074
			pixel_size * 64;
4075
		entries = DIV_ROUND_UP(entries,
4076
					  i965_cursor_wm_info.cacheline_size);
4077
		cursor_sr = i965_cursor_wm_info.fifo_size -
4078
			(entries + i965_cursor_wm_info.guard_size);
4079
 
4080
		if (cursor_sr > i965_cursor_wm_info.max_wm)
4081
			cursor_sr = i965_cursor_wm_info.max_wm;
4082
 
4083
		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4084
			      "cursor %d\n", srwm, cursor_sr);
4085
 
4086
		if (IS_CRESTLINE(dev))
4087
			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4088
	} else {
4089
		/* Turn off self refresh if both pipes are enabled */
4090
		if (IS_CRESTLINE(dev))
4091
			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4092
				   & ~FW_BLC_SELF_EN);
4093
	}
4094
 
4095
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4096
		      srwm);
4097
 
4098
	/* 965 has limitations... */
4099
	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4100
		   (8 << 16) | (8 << 8) | (8 << 0));
4101
	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4102
	/* update cursor SR watermark */
4103
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4104
}
4105
 
4106
static void i9xx_update_wm(struct drm_device *dev)
4107
{
4108
	struct drm_i915_private *dev_priv = dev->dev_private;
4109
	const struct intel_watermark_params *wm_info;
4110
	uint32_t fwater_lo;
4111
	uint32_t fwater_hi;
4112
	int cwm, srwm = 1;
4113
	int fifo_size;
4114
	int planea_wm, planeb_wm;
4115
	struct drm_crtc *crtc, *enabled = NULL;
4116
 
4117
	if (IS_I945GM(dev))
4118
		wm_info = &i945_wm_info;
4119
	else if (!IS_GEN2(dev))
4120
		wm_info = &i915_wm_info;
4121
	else
4122
		wm_info = &i855_wm_info;
4123
 
4124
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4125
	crtc = intel_get_crtc_for_plane(dev, 0);
4126
	if (crtc->enabled && crtc->fb) {
4127
		planea_wm = intel_calculate_wm(crtc->mode.clock,
4128
					       wm_info, fifo_size,
4129
					       crtc->fb->bits_per_pixel / 8,
4130
					       latency_ns);
4131
		enabled = crtc;
4132
	} else
4133
		planea_wm = fifo_size - wm_info->guard_size;
4134
 
4135
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4136
	crtc = intel_get_crtc_for_plane(dev, 1);
4137
	if (crtc->enabled && crtc->fb) {
4138
		planeb_wm = intel_calculate_wm(crtc->mode.clock,
4139
					       wm_info, fifo_size,
4140
					       crtc->fb->bits_per_pixel / 8,
4141
					       latency_ns);
4142
		if (enabled == NULL)
4143
			enabled = crtc;
4144
		else
4145
			enabled = NULL;
4146
	} else
4147
		planeb_wm = fifo_size - wm_info->guard_size;
4148
 
4149
	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4150
 
4151
	/*
4152
	 * Overlay gets an aggressive default since video jitter is bad.
4153
	 */
4154
	cwm = 2;
4155
 
4156
	/* Play safe and disable self-refresh before adjusting watermarks. */
4157
	if (IS_I945G(dev) || IS_I945GM(dev))
4158
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4159
	else if (IS_I915GM(dev))
4160
		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4161
 
4162
	/* Calc sr entries for one plane configs */
4163
	if (HAS_FW_BLC(dev) && enabled) {
4164
		/* self-refresh has much higher latency */
4165
		static const int sr_latency_ns = 6000;
4166
		int clock = enabled->mode.clock;
4167
		int htotal = enabled->mode.htotal;
4168
		int hdisplay = enabled->mode.hdisplay;
4169
		int pixel_size = enabled->fb->bits_per_pixel / 8;
4170
		unsigned long line_time_us;
4171
		int entries;
4172
 
4173
		line_time_us = (htotal * 1000) / clock;
4174
 
4175
		/* Use ns/us then divide to preserve precision */
4176
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4177
			pixel_size * hdisplay;
4178
		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4179
		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4180
		srwm = wm_info->fifo_size - entries;
4181
		if (srwm < 0)
4182
			srwm = 1;
4183
 
4184
		if (IS_I945G(dev) || IS_I945GM(dev))
4185
			I915_WRITE(FW_BLC_SELF,
4186
				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4187
		else if (IS_I915GM(dev))
4188
			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4189
	}
4190
 
4191
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4192
		      planea_wm, planeb_wm, cwm, srwm);
4193
 
4194
	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4195
	fwater_hi = (cwm & 0x1f);
4196
 
4197
	/* Set request length to 8 cachelines per fetch */
4198
	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4199
	fwater_hi = fwater_hi | (1 << 8);
4200
 
4201
	I915_WRITE(FW_BLC, fwater_lo);
4202
	I915_WRITE(FW_BLC2, fwater_hi);
4203
 
4204
	if (HAS_FW_BLC(dev)) {
4205
		if (enabled) {
4206
			if (IS_I945G(dev) || IS_I945GM(dev))
4207
				I915_WRITE(FW_BLC_SELF,
4208
					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4209
			else if (IS_I915GM(dev))
4210
				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4211
			DRM_DEBUG_KMS("memory self refresh enabled\n");
4212
		} else
4213
			DRM_DEBUG_KMS("memory self refresh disabled\n");
4214
	}
4215
}
4216
 
4217
static void i830_update_wm(struct drm_device *dev)
4218
{
4219
	struct drm_i915_private *dev_priv = dev->dev_private;
4220
	struct drm_crtc *crtc;
4221
	uint32_t fwater_lo;
4222
	int planea_wm;
4223
 
4224
	crtc = single_enabled_crtc(dev);
4225
	if (crtc == NULL)
4226
		return;
4227
 
4228
	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4229
				       dev_priv->display.get_fifo_size(dev, 0),
4230
				       crtc->fb->bits_per_pixel / 8,
4231
				       latency_ns);
4232
	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4233
	fwater_lo |= (3<<8) | planea_wm;
4234
 
4235
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4236
 
4237
	I915_WRITE(FW_BLC, fwater_lo);
4238
}
4239
 
4240
#define ILK_LP0_PLANE_LATENCY		700
4241
#define ILK_LP0_CURSOR_LATENCY		1300
4242
 
4243
/*
4244
 * Check the wm result.
4245
 *
4246
 * If any calculated watermark values is larger than the maximum value that
4247
 * can be programmed into the associated watermark register, that watermark
4248
 * must be disabled.
4249
 */
4250
static bool ironlake_check_srwm(struct drm_device *dev, int level,
4251
				int fbc_wm, int display_wm, int cursor_wm,
4252
				const struct intel_watermark_params *display,
4253
				const struct intel_watermark_params *cursor)
4254
{
4255
	struct drm_i915_private *dev_priv = dev->dev_private;
4256
 
4257
	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4258
		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4259
 
4260
	if (fbc_wm > SNB_FBC_MAX_SRWM) {
4261
		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4262
			      fbc_wm, SNB_FBC_MAX_SRWM, level);
4263
 
4264
		/* fbc has it's own way to disable FBC WM */
4265
		I915_WRITE(DISP_ARB_CTL,
4266
			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4267
		return false;
4268
	}
4269
 
4270
	if (display_wm > display->max_wm) {
4271
		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4272
			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
4273
		return false;
4274
	}
4275
 
4276
	if (cursor_wm > cursor->max_wm) {
4277
		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4278
			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4279
		return false;
4280
	}
4281
 
4282
	if (!(fbc_wm || display_wm || cursor_wm)) {
4283
		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4284
		return false;
4285
	}
4286
 
4287
	return true;
4288
}
4289
 
4290
/*
4291
 * Compute watermark values of WM[1-3],
4292
 */
4293
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4294
                  int latency_ns,
4295
                  const struct intel_watermark_params *display,
4296
                  const struct intel_watermark_params *cursor,
4297
                  int *fbc_wm, int *display_wm, int *cursor_wm)
4298
{
4299
    struct drm_crtc *crtc;
4300
    unsigned long line_time_us;
4301
    int hdisplay, htotal, pixel_size, clock;
4302
    int line_count, line_size;
4303
    int small, large;
4304
    int entries;
4305
 
4306
    if (!latency_ns) {
4307
        *fbc_wm = *display_wm = *cursor_wm = 0;
4308
        return false;
4309
    }
4310
 
4311
    crtc = intel_get_crtc_for_plane(dev, plane);
4312
    hdisplay = crtc->mode.hdisplay;
4313
    htotal = crtc->mode.htotal;
4314
    clock = crtc->mode.clock;
4315
    pixel_size = crtc->fb->bits_per_pixel / 8;
4316
 
4317
    line_time_us = (htotal * 1000) / clock;
4318
    line_count = (latency_ns / line_time_us + 1000) / 1000;
4319
    line_size = hdisplay * pixel_size;
4320
 
4321
    /* Use the minimum of the small and large buffer method for primary */
4322
    small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4323
    large = line_count * line_size;
4324
 
4325
    entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4326
    *display_wm = entries + display->guard_size;
4327
 
4328
    /*
4329
     * Spec says:
4330
     * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4331
     */
4332
    *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4333
 
4334
    /* calculate the self-refresh watermark for display cursor */
4335
    entries = line_count * pixel_size * 64;
4336
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4337
    *cursor_wm = entries + cursor->guard_size;
4338
 
4339
    return ironlake_check_srwm(dev, level,
4340
                   *fbc_wm, *display_wm, *cursor_wm,
4341
                   display, cursor);
4342
}
4343
 
4344
static void ironlake_update_wm(struct drm_device *dev)
4345
{
4346
	struct drm_i915_private *dev_priv = dev->dev_private;
4347
	int fbc_wm, plane_wm, cursor_wm;
4348
	unsigned int enabled;
4349
 
4350
	enabled = 0;
4351
	if (g4x_compute_wm0(dev, 0,
4352
			    &ironlake_display_wm_info,
4353
			    ILK_LP0_PLANE_LATENCY,
4354
			    &ironlake_cursor_wm_info,
4355
			    ILK_LP0_CURSOR_LATENCY,
4356
			    &plane_wm, &cursor_wm)) {
4357
		I915_WRITE(WM0_PIPEA_ILK,
4358
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4359
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4360
			      " plane %d, " "cursor: %d\n",
4361
			      plane_wm, cursor_wm);
4362
		enabled |= 1;
4363
	}
4364
 
4365
	if (g4x_compute_wm0(dev, 1,
4366
			    &ironlake_display_wm_info,
4367
			    ILK_LP0_PLANE_LATENCY,
4368
			    &ironlake_cursor_wm_info,
4369
			    ILK_LP0_CURSOR_LATENCY,
4370
			    &plane_wm, &cursor_wm)) {
4371
		I915_WRITE(WM0_PIPEB_ILK,
4372
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4373
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4374
			      " plane %d, cursor: %d\n",
4375
			      plane_wm, cursor_wm);
4376
		enabled |= 2;
4377
	}
4378
 
4379
	/*
4380
	 * Calculate and update the self-refresh watermark only when one
4381
	 * display plane is used.
4382
	 */
4383
	I915_WRITE(WM3_LP_ILK, 0);
4384
	I915_WRITE(WM2_LP_ILK, 0);
4385
	I915_WRITE(WM1_LP_ILK, 0);
4386
 
4387
	if (!single_plane_enabled(enabled))
4388
		return;
4389
	enabled = ffs(enabled) - 1;
4390
 
4391
	/* WM1 */
4392
	if (!ironlake_compute_srwm(dev, 1, enabled,
4393
				   ILK_READ_WM1_LATENCY() * 500,
4394
				   &ironlake_display_srwm_info,
4395
				   &ironlake_cursor_srwm_info,
4396
				   &fbc_wm, &plane_wm, &cursor_wm))
4397
		return;
4398
 
4399
	I915_WRITE(WM1_LP_ILK,
4400
		   WM1_LP_SR_EN |
4401
		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4402
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4403
		   (plane_wm << WM1_LP_SR_SHIFT) |
4404
		   cursor_wm);
4405
 
4406
	/* WM2 */
4407
	if (!ironlake_compute_srwm(dev, 2, enabled,
4408
				   ILK_READ_WM2_LATENCY() * 500,
4409
				   &ironlake_display_srwm_info,
4410
				   &ironlake_cursor_srwm_info,
4411
				   &fbc_wm, &plane_wm, &cursor_wm))
4412
		return;
4413
 
4414
	I915_WRITE(WM2_LP_ILK,
4415
		   WM2_LP_EN |
4416
		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4417
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4418
		   (plane_wm << WM1_LP_SR_SHIFT) |
4419
		   cursor_wm);
4420
 
4421
	/*
4422
	 * WM3 is unsupported on ILK, probably because we don't have latency
4423
	 * data for that power state
4424
	 */
4425
}
4426
 
4427
static void sandybridge_update_wm(struct drm_device *dev)
4428
{
4429
	struct drm_i915_private *dev_priv = dev->dev_private;
4430
	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4431
	int fbc_wm, plane_wm, cursor_wm;
4432
	unsigned int enabled;
4433
 
2336 Serge 4434
    ENTER();
4435
 
2327 Serge 4436
	enabled = 0;
4437
	if (g4x_compute_wm0(dev, 0,
4438
			    &sandybridge_display_wm_info, latency,
4439
			    &sandybridge_cursor_wm_info, latency,
4440
			    &plane_wm, &cursor_wm)) {
4441
		I915_WRITE(WM0_PIPEA_ILK,
4442
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4443
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4444
			      " plane %d, " "cursor: %d\n",
4445
			      plane_wm, cursor_wm);
4446
		enabled |= 1;
4447
	}
4448
 
4449
	if (g4x_compute_wm0(dev, 1,
4450
			    &sandybridge_display_wm_info, latency,
4451
			    &sandybridge_cursor_wm_info, latency,
4452
			    &plane_wm, &cursor_wm)) {
4453
		I915_WRITE(WM0_PIPEB_ILK,
4454
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4455
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4456
			      " plane %d, cursor: %d\n",
4457
			      plane_wm, cursor_wm);
4458
		enabled |= 2;
4459
	}
4460
 
4461
	/*
4462
	 * Calculate and update the self-refresh watermark only when one
4463
	 * display plane is used.
4464
	 *
4465
	 * SNB support 3 levels of watermark.
4466
	 *
4467
	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4468
	 * and disabled in the descending order
4469
	 *
4470
	 */
4471
	I915_WRITE(WM3_LP_ILK, 0);
4472
	I915_WRITE(WM2_LP_ILK, 0);
4473
	I915_WRITE(WM1_LP_ILK, 0);
4474
 
4475
	if (!single_plane_enabled(enabled))
2336 Serge 4476
    {
4477
        LEAVE();
2327 Serge 4478
		return;
2336 Serge 4479
    };
4480
 
2327 Serge 4481
	enabled = ffs(enabled) - 1;
4482
 
2336 Serge 4483
    dbgprintf("compute wm1\n");
4484
 
2327 Serge 4485
	/* WM1 */
4486
	if (!ironlake_compute_srwm(dev, 1, enabled,
4487
				   SNB_READ_WM1_LATENCY() * 500,
4488
				   &sandybridge_display_srwm_info,
4489
				   &sandybridge_cursor_srwm_info,
4490
				   &fbc_wm, &plane_wm, &cursor_wm))
4491
		return;
4492
 
4493
	I915_WRITE(WM1_LP_ILK,
4494
		   WM1_LP_SR_EN |
4495
		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4496
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4497
		   (plane_wm << WM1_LP_SR_SHIFT) |
4498
		   cursor_wm);
4499
 
2336 Serge 4500
    dbgprintf("compute wm2\n");
4501
 
2327 Serge 4502
	/* WM2 */
4503
	if (!ironlake_compute_srwm(dev, 2, enabled,
4504
				   SNB_READ_WM2_LATENCY() * 500,
4505
				   &sandybridge_display_srwm_info,
4506
				   &sandybridge_cursor_srwm_info,
4507
				   &fbc_wm, &plane_wm, &cursor_wm))
4508
		return;
4509
 
4510
	I915_WRITE(WM2_LP_ILK,
4511
		   WM2_LP_EN |
4512
		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4513
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4514
		   (plane_wm << WM1_LP_SR_SHIFT) |
4515
		   cursor_wm);
4516
 
2336 Serge 4517
    dbgprintf("compute wm3\n");
4518
 
2327 Serge 4519
	/* WM3 */
4520
	if (!ironlake_compute_srwm(dev, 3, enabled,
4521
				   SNB_READ_WM3_LATENCY() * 500,
4522
				   &sandybridge_display_srwm_info,
4523
				   &sandybridge_cursor_srwm_info,
4524
				   &fbc_wm, &plane_wm, &cursor_wm))
4525
		return;
4526
 
4527
	I915_WRITE(WM3_LP_ILK,
4528
		   WM3_LP_EN |
4529
		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4530
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4531
		   (plane_wm << WM1_LP_SR_SHIFT) |
4532
		   cursor_wm);
2336 Serge 4533
 
4534
    LEAVE();
4535
 
2327 Serge 4536
}
4537
 
4538
/**
4539
 * intel_update_watermarks - update FIFO watermark values based on current modes
4540
 *
4541
 * Calculate watermark values for the various WM regs based on current mode
4542
 * and plane configuration.
4543
 *
4544
 * There are several cases to deal with here:
4545
 *   - normal (i.e. non-self-refresh)
4546
 *   - self-refresh (SR) mode
4547
 *   - lines are large relative to FIFO size (buffer can hold up to 2)
4548
 *   - lines are small relative to FIFO size (buffer can hold more than 2
4549
 *     lines), so need to account for TLB latency
4550
 *
4551
 *   The normal calculation is:
4552
 *     watermark = dotclock * bytes per pixel * latency
4553
 *   where latency is platform & configuration dependent (we assume pessimal
4554
 *   values here).
4555
 *
4556
 *   The SR calculation is:
4557
 *     watermark = (trunc(latency/line time)+1) * surface width *
4558
 *       bytes per pixel
4559
 *   where
4560
 *     line time = htotal / dotclock
4561
 *     surface width = hdisplay for normal plane and 64 for cursor
4562
 *   and latency is assumed to be high, as above.
4563
 *
4564
 * The final value programmed to the register should always be rounded up,
4565
 * and include an extra 2 entries to account for clock crossings.
4566
 *
4567
 * We don't use the sprite, so we can ignore that.  And on Crestline we have
4568
 * to set the non-SR watermarks to 8.
4569
 */
4570
static void intel_update_watermarks(struct drm_device *dev)
4571
{
4572
	struct drm_i915_private *dev_priv = dev->dev_private;
2336 Serge 4573
    ENTER();
2327 Serge 4574
	if (dev_priv->display.update_wm)
4575
		dev_priv->display.update_wm(dev);
2336 Serge 4576
    LEAVE();
2327 Serge 4577
}
4578
 
4579
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4580
{
4581
	return dev_priv->lvds_use_ssc && i915_panel_use_ssc
4582
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4583
}
4584
 
4585
/**
4586
 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4587
 * @crtc: CRTC structure
4588
 *
4589
 * A pipe may be connected to one or more outputs.  Based on the depth of the
4590
 * attached framebuffer, choose a good color depth to use on the pipe.
4591
 *
4592
 * If possible, match the pipe depth to the fb depth.  In some cases, this
4593
 * isn't ideal, because the connected output supports a lesser or restricted
4594
 * set of depths.  Resolve that here:
4595
 *    LVDS typically supports only 6bpc, so clamp down in that case
4596
 *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4597
 *    Displays may support a restricted set as well, check EDID and clamp as
4598
 *      appropriate.
4599
 *
4600
 * RETURNS:
4601
 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4602
 * true if they don't match).
4603
 */
4604
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4605
					 unsigned int *pipe_bpp)
4606
{
4607
	struct drm_device *dev = crtc->dev;
4608
	struct drm_i915_private *dev_priv = dev->dev_private;
4609
	struct drm_encoder *encoder;
4610
	struct drm_connector *connector;
4611
	unsigned int display_bpc = UINT_MAX, bpc;
4612
 
4613
	/* Walk the encoders & connectors on this crtc, get min bpc */
4614
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4615
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4616
 
4617
		if (encoder->crtc != crtc)
4618
			continue;
4619
 
4620
		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4621
			unsigned int lvds_bpc;
4622
 
4623
			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4624
			    LVDS_A3_POWER_UP)
4625
				lvds_bpc = 8;
4626
			else
4627
				lvds_bpc = 6;
4628
 
4629
			if (lvds_bpc < display_bpc) {
4630
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4631
				display_bpc = lvds_bpc;
4632
			}
4633
			continue;
4634
		}
4635
 
4636
		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4637
			/* Use VBT settings if we have an eDP panel */
4638
			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4639
 
4640
			if (edp_bpc < display_bpc) {
4641
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4642
				display_bpc = edp_bpc;
4643
			}
4644
			continue;
4645
		}
4646
 
4647
		/* Not one of the known troublemakers, check the EDID */
4648
		list_for_each_entry(connector, &dev->mode_config.connector_list,
4649
				    head) {
4650
			if (connector->encoder != encoder)
4651
				continue;
4652
 
4653
			/* Don't use an invalid EDID bpc value */
4654
			if (connector->display_info.bpc &&
4655
			    connector->display_info.bpc < display_bpc) {
4656
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4657
				display_bpc = connector->display_info.bpc;
4658
			}
4659
		}
4660
 
4661
		/*
4662
		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4663
		 * through, clamp it down.  (Note: >12bpc will be caught below.)
4664
		 */
4665
		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4666
			if (display_bpc > 8 && display_bpc < 12) {
4667
				DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
4668
				display_bpc = 12;
4669
			} else {
4670
				DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
4671
				display_bpc = 8;
4672
			}
4673
		}
4674
	}
4675
 
4676
	/*
4677
	 * We could just drive the pipe at the highest bpc all the time and
4678
	 * enable dithering as needed, but that costs bandwidth.  So choose
4679
	 * the minimum value that expresses the full color range of the fb but
4680
	 * also stays within the max display bpc discovered above.
4681
	 */
4682
 
4683
	switch (crtc->fb->depth) {
4684
	case 8:
4685
		bpc = 8; /* since we go through a colormap */
4686
		break;
4687
	case 15:
4688
	case 16:
4689
		bpc = 6; /* min is 18bpp */
4690
		break;
4691
	case 24:
4692
		bpc = min((unsigned int)8, display_bpc);
4693
		break;
4694
	case 30:
4695
		bpc = min((unsigned int)10, display_bpc);
4696
		break;
4697
	case 48:
4698
		bpc = min((unsigned int)12, display_bpc);
4699
		break;
4700
	default:
4701
		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4702
		bpc = min((unsigned int)8, display_bpc);
4703
		break;
4704
	}
4705
 
4706
	DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4707
			 bpc, display_bpc);
4708
 
4709
	*pipe_bpp = bpc * 3;
4710
 
4711
	return display_bpc != bpc;
4712
}
4713
 
4714
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4715
                  struct drm_display_mode *mode,
4716
                  struct drm_display_mode *adjusted_mode,
4717
                  int x, int y,
4718
                  struct drm_framebuffer *old_fb)
4719
{
4720
    struct drm_device *dev = crtc->dev;
4721
    struct drm_i915_private *dev_priv = dev->dev_private;
4722
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4723
    int pipe = intel_crtc->pipe;
4724
    int plane = intel_crtc->plane;
4725
    int refclk, num_connectors = 0;
4726
    intel_clock_t clock, reduced_clock;
4727
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4728
    bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4729
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4730
    struct drm_mode_config *mode_config = &dev->mode_config;
4731
    struct intel_encoder *encoder;
4732
    const intel_limit_t *limit;
4733
    int ret;
4734
    u32 temp;
4735
    u32 lvds_sync = 0;
4736
 
4737
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4738
        if (encoder->base.crtc != crtc)
4739
            continue;
4740
 
4741
        switch (encoder->type) {
4742
        case INTEL_OUTPUT_LVDS:
4743
            is_lvds = true;
4744
            break;
4745
        case INTEL_OUTPUT_SDVO:
4746
        case INTEL_OUTPUT_HDMI:
4747
            is_sdvo = true;
4748
            if (encoder->needs_tv_clock)
4749
                is_tv = true;
4750
            break;
4751
        case INTEL_OUTPUT_DVO:
4752
            is_dvo = true;
4753
            break;
4754
        case INTEL_OUTPUT_TVOUT:
4755
            is_tv = true;
4756
            break;
4757
        case INTEL_OUTPUT_ANALOG:
4758
            is_crt = true;
4759
            break;
4760
        case INTEL_OUTPUT_DISPLAYPORT:
4761
            is_dp = true;
4762
            break;
4763
        }
4764
 
4765
        num_connectors++;
4766
    }
4767
 
4768
    if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4769
        refclk = dev_priv->lvds_ssc_freq * 1000;
4770
        DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4771
                  refclk / 1000);
4772
    } else if (!IS_GEN2(dev)) {
4773
        refclk = 96000;
4774
    } else {
4775
        refclk = 48000;
4776
    }
4777
 
4778
    /*
4779
     * Returns a set of divisors for the desired target clock with the given
4780
     * refclk, or FALSE.  The returned values represent the clock equation:
4781
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4782
     */
4783
    limit = intel_limit(crtc, refclk);
4784
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4785
    if (!ok) {
4786
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
4787
        return -EINVAL;
4788
    }
4789
 
4790
    /* Ensure that the cursor is valid for the new mode before changing... */
4791
//    intel_crtc_update_cursor(crtc, true);
4792
 
4793
    if (is_lvds && dev_priv->lvds_downclock_avail) {
4794
        has_reduced_clock = limit->find_pll(limit, crtc,
4795
                            dev_priv->lvds_downclock,
4796
                            refclk,
4797
                            &reduced_clock);
4798
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
4799
            /*
4800
             * If the different P is found, it means that we can't
4801
             * switch the display clock by using the FP0/FP1.
4802
             * In such case we will disable the LVDS downclock
4803
             * feature.
4804
             */
4805
            DRM_DEBUG_KMS("Different P is found for "
4806
                      "LVDS clock/downclock\n");
4807
            has_reduced_clock = 0;
4808
        }
4809
    }
4810
    /* SDVO TV has fixed PLL values depend on its clock range,
4811
       this mirrors vbios setting. */
4812
    if (is_sdvo && is_tv) {
4813
        if (adjusted_mode->clock >= 100000
4814
            && adjusted_mode->clock < 140500) {
4815
            clock.p1 = 2;
4816
            clock.p2 = 10;
4817
            clock.n = 3;
4818
            clock.m1 = 16;
4819
            clock.m2 = 8;
4820
        } else if (adjusted_mode->clock >= 140500
4821
               && adjusted_mode->clock <= 200000) {
4822
            clock.p1 = 1;
4823
            clock.p2 = 10;
4824
            clock.n = 6;
4825
            clock.m1 = 12;
4826
            clock.m2 = 8;
4827
        }
4828
    }
4829
 
4830
    if (IS_PINEVIEW(dev)) {
4831
        fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
4832
        if (has_reduced_clock)
4833
            fp2 = (1 << reduced_clock.n) << 16 |
4834
                reduced_clock.m1 << 8 | reduced_clock.m2;
4835
    } else {
4836
        fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4837
        if (has_reduced_clock)
4838
            fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4839
                reduced_clock.m2;
4840
    }
4841
 
4842
    dpll = DPLL_VGA_MODE_DIS;
4843
 
4844
    if (!IS_GEN2(dev)) {
4845
        if (is_lvds)
4846
            dpll |= DPLLB_MODE_LVDS;
4847
        else
4848
            dpll |= DPLLB_MODE_DAC_SERIAL;
4849
        if (is_sdvo) {
4850
            int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4851
            if (pixel_multiplier > 1) {
4852
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4853
                    dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4854
            }
4855
            dpll |= DPLL_DVO_HIGH_SPEED;
4856
        }
4857
        if (is_dp)
4858
            dpll |= DPLL_DVO_HIGH_SPEED;
4859
 
4860
        /* compute bitmask from p1 value */
4861
        if (IS_PINEVIEW(dev))
4862
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4863
        else {
4864
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4865
            if (IS_G4X(dev) && has_reduced_clock)
4866
                dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4867
        }
4868
        switch (clock.p2) {
4869
        case 5:
4870
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4871
            break;
4872
        case 7:
4873
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4874
            break;
4875
        case 10:
4876
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4877
            break;
4878
        case 14:
4879
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4880
            break;
4881
        }
4882
        if (INTEL_INFO(dev)->gen >= 4)
4883
            dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4884
    } else {
4885
        if (is_lvds) {
4886
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4887
        } else {
4888
            if (clock.p1 == 2)
4889
                dpll |= PLL_P1_DIVIDE_BY_TWO;
4890
            else
4891
                dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4892
            if (clock.p2 == 4)
4893
                dpll |= PLL_P2_DIVIDE_BY_4;
4894
        }
4895
    }
4896
 
4897
    if (is_sdvo && is_tv)
4898
        dpll |= PLL_REF_INPUT_TVCLKINBC;
4899
    else if (is_tv)
4900
        /* XXX: just matching BIOS for now */
4901
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
4902
        dpll |= 3;
4903
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4904
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4905
    else
4906
        dpll |= PLL_REF_INPUT_DREFCLK;
4907
 
4908
    /* setup pipeconf */
4909
    pipeconf = I915_READ(PIPECONF(pipe));
4910
 
4911
    /* Set up the display plane register */
4912
    dspcntr = DISPPLANE_GAMMA_ENABLE;
4913
 
4914
    /* Ironlake's plane is forced to pipe, bit 24 is to
4915
       enable color space conversion */
4916
    if (pipe == 0)
4917
        dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4918
    else
4919
        dspcntr |= DISPPLANE_SEL_PIPE_B;
4920
 
4921
    if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4922
        /* Enable pixel doubling when the dot clock is > 90% of the (display)
4923
         * core speed.
4924
         *
4925
         * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4926
         * pipe == 0 check?
4927
         */
4928
        if (mode->clock >
4929
            dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4930
            pipeconf |= PIPECONF_DOUBLE_WIDE;
4931
        else
4932
            pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4933
    }
4934
 
4935
    dpll |= DPLL_VCO_ENABLE;
4936
 
4937
    DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4938
    drm_mode_debug_printmodeline(mode);
4939
 
4940
    I915_WRITE(FP0(pipe), fp);
4941
    I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4942
 
4943
    POSTING_READ(DPLL(pipe));
4944
    udelay(150);
4945
 
4946
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4947
     * This is an exception to the general rule that mode_set doesn't turn
4948
     * things on.
4949
     */
4950
    if (is_lvds) {
4951
        temp = I915_READ(LVDS);
4952
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4953
        if (pipe == 1) {
4954
            temp |= LVDS_PIPEB_SELECT;
4955
        } else {
4956
            temp &= ~LVDS_PIPEB_SELECT;
4957
        }
4958
        /* set the corresponsding LVDS_BORDER bit */
4959
        temp |= dev_priv->lvds_border_bits;
4960
        /* Set the B0-B3 data pairs corresponding to whether we're going to
4961
         * set the DPLLs for dual-channel mode or not.
4962
         */
4963
        if (clock.p2 == 7)
4964
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4965
        else
4966
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4967
 
4968
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4969
         * appropriately here, but we need to look more thoroughly into how
4970
         * panels behave in the two modes.
4971
         */
4972
        /* set the dithering flag on LVDS as needed */
4973
        if (INTEL_INFO(dev)->gen >= 4) {
4974
            if (dev_priv->lvds_dither)
4975
                temp |= LVDS_ENABLE_DITHER;
4976
            else
4977
                temp &= ~LVDS_ENABLE_DITHER;
4978
        }
4979
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4980
            lvds_sync |= LVDS_HSYNC_POLARITY;
4981
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4982
            lvds_sync |= LVDS_VSYNC_POLARITY;
4983
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
4984
            != lvds_sync) {
4985
            char flags[2] = "-+";
4986
            DRM_INFO("Changing LVDS panel from "
4987
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
4988
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
4989
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
4990
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
4991
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
4992
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4993
            temp |= lvds_sync;
4994
        }
4995
        I915_WRITE(LVDS, temp);
4996
    }
4997
 
4998
    if (is_dp) {
4999
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
5000
    }
5001
 
5002
    I915_WRITE(DPLL(pipe), dpll);
5003
 
5004
    /* Wait for the clocks to stabilize. */
5005
    POSTING_READ(DPLL(pipe));
5006
    udelay(150);
5007
 
5008
    if (INTEL_INFO(dev)->gen >= 4) {
5009
        temp = 0;
5010
        if (is_sdvo) {
5011
            temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5012
            if (temp > 1)
5013
                temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5014
            else
5015
                temp = 0;
5016
        }
5017
        I915_WRITE(DPLL_MD(pipe), temp);
5018
    } else {
5019
        /* The pixel multiplier can only be updated once the
5020
         * DPLL is enabled and the clocks are stable.
5021
         *
5022
         * So write it again.
5023
         */
5024
        I915_WRITE(DPLL(pipe), dpll);
5025
    }
5026
 
5027
    intel_crtc->lowfreq_avail = false;
5028
    if (is_lvds && has_reduced_clock && i915_powersave) {
5029
        I915_WRITE(FP1(pipe), fp2);
5030
        intel_crtc->lowfreq_avail = true;
5031
        if (HAS_PIPE_CXSR(dev)) {
5032
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5033
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5034
        }
5035
    } else {
5036
        I915_WRITE(FP1(pipe), fp);
5037
        if (HAS_PIPE_CXSR(dev)) {
5038
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5039
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5040
        }
5041
    }
5042
 
5043
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5044
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5045
        /* the chip adds 2 halflines automatically */
5046
        adjusted_mode->crtc_vdisplay -= 1;
5047
        adjusted_mode->crtc_vtotal -= 1;
5048
        adjusted_mode->crtc_vblank_start -= 1;
5049
        adjusted_mode->crtc_vblank_end -= 1;
5050
        adjusted_mode->crtc_vsync_end -= 1;
5051
        adjusted_mode->crtc_vsync_start -= 1;
5052
    } else
5053
        pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
5054
 
5055
    I915_WRITE(HTOTAL(pipe),
5056
           (adjusted_mode->crtc_hdisplay - 1) |
5057
           ((adjusted_mode->crtc_htotal - 1) << 16));
5058
    I915_WRITE(HBLANK(pipe),
5059
           (adjusted_mode->crtc_hblank_start - 1) |
5060
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
5061
    I915_WRITE(HSYNC(pipe),
5062
           (adjusted_mode->crtc_hsync_start - 1) |
5063
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
5064
 
5065
    I915_WRITE(VTOTAL(pipe),
5066
           (adjusted_mode->crtc_vdisplay - 1) |
5067
           ((adjusted_mode->crtc_vtotal - 1) << 16));
5068
    I915_WRITE(VBLANK(pipe),
5069
           (adjusted_mode->crtc_vblank_start - 1) |
5070
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
5071
    I915_WRITE(VSYNC(pipe),
5072
           (adjusted_mode->crtc_vsync_start - 1) |
5073
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
5074
 
5075
    /* pipesrc and dspsize control the size that is scaled from,
5076
     * which should always be the user's requested size.
5077
     */
5078
    I915_WRITE(DSPSIZE(plane),
5079
           ((mode->vdisplay - 1) << 16) |
5080
           (mode->hdisplay - 1));
5081
    I915_WRITE(DSPPOS(plane), 0);
5082
    I915_WRITE(PIPESRC(pipe),
5083
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5084
 
5085
    I915_WRITE(PIPECONF(pipe), pipeconf);
5086
    POSTING_READ(PIPECONF(pipe));
5087
    intel_enable_pipe(dev_priv, pipe, false);
5088
 
5089
    intel_wait_for_vblank(dev, pipe);
5090
 
5091
    I915_WRITE(DSPCNTR(plane), dspcntr);
5092
    POSTING_READ(DSPCNTR(plane));
5093
    intel_enable_plane(dev_priv, plane, pipe);
5094
 
5095
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
5096
 
5097
    intel_update_watermarks(dev);
5098
 
5099
    return ret;
5100
}
5101
 
5102
static void ironlake_update_pch_refclk(struct drm_device *dev)
5103
{
5104
	struct drm_i915_private *dev_priv = dev->dev_private;
5105
	struct drm_mode_config *mode_config = &dev->mode_config;
5106
	struct drm_crtc *crtc;
5107
	struct intel_encoder *encoder;
5108
	struct intel_encoder *has_edp_encoder = NULL;
5109
	u32 temp;
5110
	bool has_lvds = false;
5111
 
5112
	/* We need to take the global config into account */
5113
	list_for_each_entry(crtc, &mode_config->crtc_list, head) {
5114
		if (!crtc->enabled)
5115
			continue;
5116
 
5117
		list_for_each_entry(encoder, &mode_config->encoder_list,
5118
				    base.head) {
5119
			if (encoder->base.crtc != crtc)
5120
				continue;
5121
 
5122
			switch (encoder->type) {
5123
			case INTEL_OUTPUT_LVDS:
5124
				has_lvds = true;
5125
			case INTEL_OUTPUT_EDP:
5126
				has_edp_encoder = encoder;
5127
				break;
5128
			}
5129
		}
5130
	}
5131
 
5132
	/* Ironlake: try to setup display ref clock before DPLL
5133
	 * enabling. This is only under driver's control after
5134
	 * PCH B stepping, previous chipset stepping should be
5135
	 * ignoring this setting.
5136
	 */
5137
	temp = I915_READ(PCH_DREF_CONTROL);
5138
	/* Always enable nonspread source */
5139
	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5140
	temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5141
	temp &= ~DREF_SSC_SOURCE_MASK;
5142
	temp |= DREF_SSC_SOURCE_ENABLE;
5143
	I915_WRITE(PCH_DREF_CONTROL, temp);
5144
 
5145
	POSTING_READ(PCH_DREF_CONTROL);
5146
	udelay(200);
5147
 
5148
	if (has_edp_encoder) {
5149
		if (intel_panel_use_ssc(dev_priv)) {
5150
			temp |= DREF_SSC1_ENABLE;
5151
			I915_WRITE(PCH_DREF_CONTROL, temp);
5152
 
5153
			POSTING_READ(PCH_DREF_CONTROL);
5154
			udelay(200);
5155
		}
5156
		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5157
 
5158
		/* Enable CPU source on CPU attached eDP */
5159
		if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5160
			if (intel_panel_use_ssc(dev_priv))
5161
				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5162
			else
5163
				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5164
		} else {
5165
			/* Enable SSC on PCH eDP if needed */
5166
			if (intel_panel_use_ssc(dev_priv)) {
5167
				DRM_ERROR("enabling SSC on PCH\n");
5168
				temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
5169
			}
5170
		}
5171
		I915_WRITE(PCH_DREF_CONTROL, temp);
5172
		POSTING_READ(PCH_DREF_CONTROL);
5173
		udelay(200);
5174
	}
5175
}
5176
 
5177
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5178
                  struct drm_display_mode *mode,
5179
                  struct drm_display_mode *adjusted_mode,
5180
                  int x, int y,
5181
                  struct drm_framebuffer *old_fb)
5182
{
5183
    struct drm_device *dev = crtc->dev;
5184
    struct drm_i915_private *dev_priv = dev->dev_private;
5185
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5186
    int pipe = intel_crtc->pipe;
5187
    int plane = intel_crtc->plane;
5188
    int refclk, num_connectors = 0;
5189
    intel_clock_t clock, reduced_clock;
5190
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5191
    bool ok, has_reduced_clock = false, is_sdvo = false;
5192
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5193
    struct intel_encoder *has_edp_encoder = NULL;
5194
    struct drm_mode_config *mode_config = &dev->mode_config;
5195
    struct intel_encoder *encoder;
5196
    const intel_limit_t *limit;
5197
    int ret;
5198
    struct fdi_m_n m_n = {0};
5199
    u32 temp;
5200
    u32 lvds_sync = 0;
5201
    int target_clock, pixel_multiplier, lane, link_bw, factor;
5202
    unsigned int pipe_bpp;
5203
    bool dither;
5204
 
2336 Serge 5205
    ENTER();
5206
 
2327 Serge 5207
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5208
        if (encoder->base.crtc != crtc)
5209
            continue;
5210
 
5211
        switch (encoder->type) {
5212
        case INTEL_OUTPUT_LVDS:
5213
            is_lvds = true;
5214
            break;
5215
        case INTEL_OUTPUT_SDVO:
5216
        case INTEL_OUTPUT_HDMI:
5217
            is_sdvo = true;
5218
            if (encoder->needs_tv_clock)
5219
                is_tv = true;
5220
            break;
5221
        case INTEL_OUTPUT_TVOUT:
5222
            is_tv = true;
5223
            break;
5224
        case INTEL_OUTPUT_ANALOG:
5225
            is_crt = true;
5226
            break;
5227
        case INTEL_OUTPUT_DISPLAYPORT:
5228
            is_dp = true;
5229
            break;
5230
        case INTEL_OUTPUT_EDP:
5231
            has_edp_encoder = encoder;
5232
            break;
5233
        }
5234
 
5235
        num_connectors++;
5236
    }
5237
 
5238
    if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5239
        refclk = dev_priv->lvds_ssc_freq * 1000;
5240
        DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5241
                  refclk / 1000);
5242
    } else {
5243
        refclk = 96000;
5244
        if (!has_edp_encoder ||
5245
            intel_encoder_is_pch_edp(&has_edp_encoder->base))
5246
            refclk = 120000; /* 120Mhz refclk */
5247
    }
5248
 
5249
    /*
5250
     * Returns a set of divisors for the desired target clock with the given
5251
     * refclk, or FALSE.  The returned values represent the clock equation:
5252
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5253
     */
5254
    limit = intel_limit(crtc, refclk);
5255
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
5256
    if (!ok) {
5257
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
5258
        return -EINVAL;
5259
    }
5260
 
5261
    /* Ensure that the cursor is valid for the new mode before changing... */
5262
//    intel_crtc_update_cursor(crtc, true);
5263
 
5264
    if (is_lvds && dev_priv->lvds_downclock_avail) {
5265
        has_reduced_clock = limit->find_pll(limit, crtc,
5266
                            dev_priv->lvds_downclock,
5267
                            refclk,
5268
                            &reduced_clock);
5269
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
5270
            /*
5271
             * If the different P is found, it means that we can't
5272
             * switch the display clock by using the FP0/FP1.
5273
             * In such case we will disable the LVDS downclock
5274
             * feature.
5275
             */
5276
            DRM_DEBUG_KMS("Different P is found for "
5277
                      "LVDS clock/downclock\n");
5278
            has_reduced_clock = 0;
5279
        }
5280
    }
5281
    /* SDVO TV has fixed PLL values depend on its clock range,
5282
       this mirrors vbios setting. */
5283
    if (is_sdvo && is_tv) {
5284
        if (adjusted_mode->clock >= 100000
5285
            && adjusted_mode->clock < 140500) {
5286
            clock.p1 = 2;
5287
            clock.p2 = 10;
5288
            clock.n = 3;
5289
            clock.m1 = 16;
5290
            clock.m2 = 8;
5291
        } else if (adjusted_mode->clock >= 140500
5292
               && adjusted_mode->clock <= 200000) {
5293
            clock.p1 = 1;
5294
            clock.p2 = 10;
5295
            clock.n = 6;
5296
            clock.m1 = 12;
5297
            clock.m2 = 8;
5298
        }
5299
    }
5300
 
5301
    /* FDI link */
5302
    pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5303
    lane = 0;
5304
    /* CPU eDP doesn't require FDI link, so just set DP M/N
5305
       according to current link config */
5306
    if (has_edp_encoder &&
5307
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5308
        target_clock = mode->clock;
5309
        intel_edp_link_config(has_edp_encoder,
5310
                      &lane, &link_bw);
5311
    } else {
5312
        /* [e]DP over FDI requires target mode clock
5313
           instead of link clock */
5314
        if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5315
            target_clock = mode->clock;
5316
        else
5317
            target_clock = adjusted_mode->clock;
5318
 
5319
        /* FDI is a binary signal running at ~2.7GHz, encoding
5320
         * each output octet as 10 bits. The actual frequency
5321
         * is stored as a divider into a 100MHz clock, and the
5322
         * mode pixel clock is stored in units of 1KHz.
5323
         * Hence the bw of each lane in terms of the mode signal
5324
         * is:
5325
         */
5326
        link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5327
    }
5328
 
5329
    /* determine panel color depth */
5330
    temp = I915_READ(PIPECONF(pipe));
5331
    temp &= ~PIPE_BPC_MASK;
5332
    dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
5333
    switch (pipe_bpp) {
5334
    case 18:
5335
        temp |= PIPE_6BPC;
5336
        break;
5337
    case 24:
5338
        temp |= PIPE_8BPC;
5339
        break;
5340
    case 30:
5341
        temp |= PIPE_10BPC;
5342
        break;
5343
    case 36:
5344
        temp |= PIPE_12BPC;
5345
        break;
5346
    default:
5347
        WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5348
            pipe_bpp);
5349
        temp |= PIPE_8BPC;
5350
        pipe_bpp = 24;
5351
        break;
5352
    }
5353
 
5354
    intel_crtc->bpp = pipe_bpp;
5355
    I915_WRITE(PIPECONF(pipe), temp);
5356
 
5357
    if (!lane) {
5358
        /*
5359
         * Account for spread spectrum to avoid
5360
         * oversubscribing the link. Max center spread
5361
         * is 2.5%; use 5% for safety's sake.
5362
         */
5363
        u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5364
        lane = bps / (link_bw * 8) + 1;
5365
    }
5366
 
5367
    intel_crtc->fdi_lanes = lane;
5368
 
5369
    if (pixel_multiplier > 1)
5370
        link_bw *= pixel_multiplier;
5371
    ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5372
                 &m_n);
5373
 
5374
    ironlake_update_pch_refclk(dev);
5375
 
5376
    fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5377
    if (has_reduced_clock)
5378
        fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5379
            reduced_clock.m2;
5380
 
5381
    /* Enable autotuning of the PLL clock (if permissible) */
5382
    factor = 21;
5383
    if (is_lvds) {
5384
        if ((intel_panel_use_ssc(dev_priv) &&
5385
             dev_priv->lvds_ssc_freq == 100) ||
5386
            (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5387
            factor = 25;
5388
    } else if (is_sdvo && is_tv)
5389
        factor = 20;
5390
 
5391
    if (clock.m < factor * clock.n)
5392
        fp |= FP_CB_TUNE;
5393
 
5394
    dpll = 0;
5395
 
5396
    if (is_lvds)
5397
        dpll |= DPLLB_MODE_LVDS;
5398
    else
5399
        dpll |= DPLLB_MODE_DAC_SERIAL;
5400
    if (is_sdvo) {
5401
        int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5402
        if (pixel_multiplier > 1) {
5403
            dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5404
        }
5405
        dpll |= DPLL_DVO_HIGH_SPEED;
5406
    }
5407
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5408
        dpll |= DPLL_DVO_HIGH_SPEED;
5409
 
5410
    /* compute bitmask from p1 value */
5411
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5412
    /* also FPA1 */
5413
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5414
 
5415
    switch (clock.p2) {
5416
    case 5:
5417
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5418
        break;
5419
    case 7:
5420
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5421
        break;
5422
    case 10:
5423
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5424
        break;
5425
    case 14:
5426
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5427
        break;
5428
    }
5429
 
5430
    if (is_sdvo && is_tv)
5431
        dpll |= PLL_REF_INPUT_TVCLKINBC;
5432
    else if (is_tv)
5433
        /* XXX: just matching BIOS for now */
5434
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
5435
        dpll |= 3;
5436
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5437
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5438
    else
5439
        dpll |= PLL_REF_INPUT_DREFCLK;
5440
 
5441
    /* setup pipeconf */
5442
    pipeconf = I915_READ(PIPECONF(pipe));
5443
 
5444
    /* Set up the display plane register */
5445
    dspcntr = DISPPLANE_GAMMA_ENABLE;
5446
 
5447
    DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5448
    drm_mode_debug_printmodeline(mode);
5449
 
5450
    /* PCH eDP needs FDI, but CPU eDP does not */
5451
    if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5452
        I915_WRITE(PCH_FP0(pipe), fp);
5453
        I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5454
 
5455
        POSTING_READ(PCH_DPLL(pipe));
5456
        udelay(150);
5457
    }
5458
 
5459
    /* enable transcoder DPLL */
5460
    if (HAS_PCH_CPT(dev)) {
5461
        temp = I915_READ(PCH_DPLL_SEL);
5462
        switch (pipe) {
5463
        case 0:
5464
            temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
5465
            break;
5466
        case 1:
5467
            temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
5468
            break;
5469
        case 2:
5470
            /* FIXME: manage transcoder PLLs? */
5471
            temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
5472
            break;
5473
        default:
5474
            BUG();
5475
        }
5476
        I915_WRITE(PCH_DPLL_SEL, temp);
5477
 
5478
        POSTING_READ(PCH_DPLL_SEL);
5479
        udelay(150);
5480
    }
5481
 
5482
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5483
     * This is an exception to the general rule that mode_set doesn't turn
5484
     * things on.
5485
     */
5486
    if (is_lvds) {
5487
        temp = I915_READ(PCH_LVDS);
5488
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5489
        if (pipe == 1) {
5490
            if (HAS_PCH_CPT(dev))
5491
                temp |= PORT_TRANS_B_SEL_CPT;
5492
            else
5493
                temp |= LVDS_PIPEB_SELECT;
5494
        } else {
5495
            if (HAS_PCH_CPT(dev))
5496
                temp &= ~PORT_TRANS_SEL_MASK;
5497
            else
5498
                temp &= ~LVDS_PIPEB_SELECT;
5499
        }
5500
        /* set the corresponsding LVDS_BORDER bit */
5501
        temp |= dev_priv->lvds_border_bits;
5502
        /* Set the B0-B3 data pairs corresponding to whether we're going to
5503
         * set the DPLLs for dual-channel mode or not.
5504
         */
5505
        if (clock.p2 == 7)
5506
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5507
        else
5508
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5509
 
5510
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5511
         * appropriately here, but we need to look more thoroughly into how
5512
         * panels behave in the two modes.
5513
         */
5514
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5515
            lvds_sync |= LVDS_HSYNC_POLARITY;
5516
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5517
            lvds_sync |= LVDS_VSYNC_POLARITY;
5518
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5519
            != lvds_sync) {
5520
            char flags[2] = "-+";
5521
            DRM_INFO("Changing LVDS panel from "
5522
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5523
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
5524
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
5525
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5526
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5527
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5528
            temp |= lvds_sync;
5529
        }
5530
        I915_WRITE(PCH_LVDS, temp);
5531
    }
5532
 
5533
    pipeconf &= ~PIPECONF_DITHER_EN;
5534
    pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5535
    if ((is_lvds && dev_priv->lvds_dither) || dither) {
5536
        pipeconf |= PIPECONF_DITHER_EN;
5537
        pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5538
    }
5539
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5540
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
5541
    } else {
5542
        /* For non-DP output, clear any trans DP clock recovery setting.*/
5543
        I915_WRITE(TRANSDATA_M1(pipe), 0);
5544
        I915_WRITE(TRANSDATA_N1(pipe), 0);
5545
        I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5546
        I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5547
    }
5548
 
5549
    if (!has_edp_encoder ||
5550
        intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5551
        I915_WRITE(PCH_DPLL(pipe), dpll);
5552
 
5553
        /* Wait for the clocks to stabilize. */
5554
        POSTING_READ(PCH_DPLL(pipe));
5555
        udelay(150);
5556
 
5557
        /* The pixel multiplier can only be updated once the
5558
         * DPLL is enabled and the clocks are stable.
5559
         *
5560
         * So write it again.
5561
         */
5562
        I915_WRITE(PCH_DPLL(pipe), dpll);
5563
    }
5564
 
5565
    intel_crtc->lowfreq_avail = false;
5566
    if (is_lvds && has_reduced_clock && i915_powersave) {
5567
        I915_WRITE(PCH_FP1(pipe), fp2);
5568
        intel_crtc->lowfreq_avail = true;
5569
        if (HAS_PIPE_CXSR(dev)) {
5570
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5571
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5572
        }
5573
    } else {
5574
        I915_WRITE(PCH_FP1(pipe), fp);
5575
        if (HAS_PIPE_CXSR(dev)) {
5576
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5577
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5578
        }
5579
    }
5580
 
5581
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5582
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5583
        /* the chip adds 2 halflines automatically */
5584
        adjusted_mode->crtc_vdisplay -= 1;
5585
        adjusted_mode->crtc_vtotal -= 1;
5586
        adjusted_mode->crtc_vblank_start -= 1;
5587
        adjusted_mode->crtc_vblank_end -= 1;
5588
        adjusted_mode->crtc_vsync_end -= 1;
5589
        adjusted_mode->crtc_vsync_start -= 1;
5590
    } else
5591
        pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
5592
 
5593
    I915_WRITE(HTOTAL(pipe),
5594
           (adjusted_mode->crtc_hdisplay - 1) |
5595
           ((adjusted_mode->crtc_htotal - 1) << 16));
5596
    I915_WRITE(HBLANK(pipe),
5597
           (adjusted_mode->crtc_hblank_start - 1) |
5598
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
5599
    I915_WRITE(HSYNC(pipe),
5600
           (adjusted_mode->crtc_hsync_start - 1) |
5601
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
5602
 
5603
    I915_WRITE(VTOTAL(pipe),
5604
           (adjusted_mode->crtc_vdisplay - 1) |
5605
           ((adjusted_mode->crtc_vtotal - 1) << 16));
5606
    I915_WRITE(VBLANK(pipe),
5607
           (adjusted_mode->crtc_vblank_start - 1) |
5608
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
5609
    I915_WRITE(VSYNC(pipe),
5610
           (adjusted_mode->crtc_vsync_start - 1) |
5611
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
5612
 
5613
    /* pipesrc controls the size that is scaled from, which should
5614
     * always be the user's requested size.
5615
     */
5616
    I915_WRITE(PIPESRC(pipe),
5617
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5618
 
5619
    I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5620
    I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5621
    I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5622
    I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5623
 
5624
    if (has_edp_encoder &&
5625
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5626
        ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5627
    }
5628
 
5629
    I915_WRITE(PIPECONF(pipe), pipeconf);
5630
    POSTING_READ(PIPECONF(pipe));
5631
 
5632
    intel_wait_for_vblank(dev, pipe);
5633
 
5634
    if (IS_GEN5(dev)) {
5635
        /* enable address swizzle for tiling buffer */
5636
        temp = I915_READ(DISP_ARB_CTL);
5637
        I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
5638
    }
5639
 
5640
    I915_WRITE(DSPCNTR(plane), dspcntr);
5641
    POSTING_READ(DSPCNTR(plane));
5642
 
5643
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
5644
 
2336 Serge 5645
    dbgprintf("Set base\n");
5646
 
2327 Serge 5647
    intel_update_watermarks(dev);
5648
 
2336 Serge 5649
    LEAVE();
5650
 
2327 Serge 5651
    return ret;
5652
}
5653
 
2330 Serge 5654
static int intel_crtc_mode_set(struct drm_crtc *crtc,
5655
			       struct drm_display_mode *mode,
5656
			       struct drm_display_mode *adjusted_mode,
5657
			       int x, int y,
5658
			       struct drm_framebuffer *old_fb)
5659
{
5660
	struct drm_device *dev = crtc->dev;
5661
	struct drm_i915_private *dev_priv = dev->dev_private;
5662
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5663
	int pipe = intel_crtc->pipe;
5664
	int ret;
2327 Serge 5665
 
2330 Serge 5666
//	drm_vblank_pre_modeset(dev, pipe);
2336 Serge 5667
    ENTER();
2327 Serge 5668
 
2330 Serge 5669
	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5670
					      x, y, old_fb);
2327 Serge 5671
 
2330 Serge 5672
//	drm_vblank_post_modeset(dev, pipe);
2327 Serge 5673
 
2330 Serge 5674
	intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
2336 Serge 5675
    LEAVE();
2327 Serge 5676
 
2330 Serge 5677
	return ret;
5678
}
2327 Serge 5679
 
5680
/** Loads the palette/gamma unit for the CRTC with the prepared values */
5681
void intel_crtc_load_lut(struct drm_crtc *crtc)
5682
{
5683
	struct drm_device *dev = crtc->dev;
5684
	struct drm_i915_private *dev_priv = dev->dev_private;
5685
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5686
	int palreg = PALETTE(intel_crtc->pipe);
5687
	int i;
5688
 
5689
	/* The clocks have to be on to load the palette. */
5690
	if (!crtc->enabled)
5691
		return;
5692
 
5693
	/* use legacy palette for Ironlake */
5694
	if (HAS_PCH_SPLIT(dev))
5695
		palreg = LGC_PALETTE(intel_crtc->pipe);
5696
 
5697
	for (i = 0; i < 256; i++) {
5698
		I915_WRITE(palreg + 4 * i,
5699
			   (intel_crtc->lut_r[i] << 16) |
5700
			   (intel_crtc->lut_g[i] << 8) |
5701
			   intel_crtc->lut_b[i]);
5702
	}
5703
}
5704
 
5705
 
5706
 
5707
 
5708
 
5709
 
5710
 
5711
 
5712
 
5713
 
5714
 
5715
 
5716
 
5717
 
5718
 
5719
 
5720
 
5721
 
5722
 
5723
 
5724
 
5725
 
5726
 
5727
 
5728
 
5729
 
5730
 
5731
 
5732
 
5733
 
5734
 
5735
 
5736
 
5737
 
5738
 
5739
 
5740
 
2332 Serge 5741
/** Sets the color ramps on behalf of RandR */
5742
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
5743
				 u16 blue, int regno)
5744
{
5745
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 5746
 
2332 Serge 5747
	intel_crtc->lut_r[regno] = red >> 8;
5748
	intel_crtc->lut_g[regno] = green >> 8;
5749
	intel_crtc->lut_b[regno] = blue >> 8;
5750
}
2327 Serge 5751
 
2332 Serge 5752
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5753
			     u16 *blue, int regno)
5754
{
5755
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 5756
 
2332 Serge 5757
	*red = intel_crtc->lut_r[regno] << 8;
5758
	*green = intel_crtc->lut_g[regno] << 8;
5759
	*blue = intel_crtc->lut_b[regno] << 8;
5760
}
2327 Serge 5761
 
2330 Serge 5762
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
5763
				 u16 *blue, uint32_t start, uint32_t size)
5764
{
5765
	int end = (start + size > 256) ? 256 : start + size, i;
5766
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 5767
 
2330 Serge 5768
	for (i = start; i < end; i++) {
5769
		intel_crtc->lut_r[i] = red[i] >> 8;
5770
		intel_crtc->lut_g[i] = green[i] >> 8;
5771
		intel_crtc->lut_b[i] = blue[i] >> 8;
5772
	}
2327 Serge 5773
 
2330 Serge 5774
	intel_crtc_load_lut(crtc);
5775
}
2327 Serge 5776
 
2330 Serge 5777
/**
5778
 * Get a pipe with a simple mode set on it for doing load-based monitor
5779
 * detection.
5780
 *
5781
 * It will be up to the load-detect code to adjust the pipe as appropriate for
5782
 * its requirements.  The pipe will be connected to no other encoders.
5783
 *
5784
 * Currently this code will only succeed if there is a pipe with no encoders
5785
 * configured for it.  In the future, it could choose to temporarily disable
5786
 * some outputs to free up a pipe for its use.
5787
 *
5788
 * \return crtc, or NULL if no pipes are available.
5789
 */
2327 Serge 5790
 
2330 Serge 5791
/* VESA 640x480x72Hz mode to set on the pipe */
5792
static struct drm_display_mode load_detect_mode = {
5793
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
5794
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5795
};
2327 Serge 5796
 
5797
 
5798
 
5799
 
5800
 
2330 Serge 5801
static u32
5802
intel_framebuffer_pitch_for_width(int width, int bpp)
5803
{
5804
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
5805
	return ALIGN(pitch, 64);
5806
}
2327 Serge 5807
 
2330 Serge 5808
static u32
5809
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
5810
{
5811
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5812
	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
5813
}
2327 Serge 5814
 
2330 Serge 5815
static struct drm_framebuffer *
5816
intel_framebuffer_create_for_mode(struct drm_device *dev,
5817
				  struct drm_display_mode *mode,
5818
				  int depth, int bpp)
5819
{
5820
	struct drm_i915_gem_object *obj;
5821
	struct drm_mode_fb_cmd mode_cmd;
2327 Serge 5822
 
2330 Serge 5823
//	obj = i915_gem_alloc_object(dev,
5824
//				    intel_framebuffer_size_for_mode(mode, bpp));
5825
//	if (obj == NULL)
5826
		return ERR_PTR(-ENOMEM);
2327 Serge 5827
 
2330 Serge 5828
//	mode_cmd.width = mode->hdisplay;
5829
//	mode_cmd.height = mode->vdisplay;
5830
//	mode_cmd.depth = depth;
5831
//	mode_cmd.bpp = bpp;
5832
//	mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
2327 Serge 5833
 
2330 Serge 5834
//	return intel_framebuffer_create(dev, &mode_cmd, obj);
5835
}
2327 Serge 5836
 
2330 Serge 5837
static struct drm_framebuffer *
5838
mode_fits_in_fbdev(struct drm_device *dev,
5839
		   struct drm_display_mode *mode)
5840
{
5841
	struct drm_i915_private *dev_priv = dev->dev_private;
5842
	struct drm_i915_gem_object *obj;
5843
	struct drm_framebuffer *fb;
2327 Serge 5844
 
2330 Serge 5845
//	if (dev_priv->fbdev == NULL)
5846
//		return NULL;
2327 Serge 5847
 
2330 Serge 5848
//	obj = dev_priv->fbdev->ifb.obj;
5849
//	if (obj == NULL)
5850
//		return NULL;
2327 Serge 5851
 
2330 Serge 5852
//	fb = &dev_priv->fbdev->ifb.base;
5853
//	if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
5854
//							  fb->bits_per_pixel))
5855
		return NULL;
2327 Serge 5856
 
2330 Serge 5857
//	if (obj->base.size < mode->vdisplay * fb->pitch)
5858
//		return NULL;
2327 Serge 5859
 
2330 Serge 5860
//	return fb;
5861
}
2327 Serge 5862
 
2330 Serge 5863
bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5864
				struct drm_connector *connector,
5865
				struct drm_display_mode *mode,
5866
				struct intel_load_detect_pipe *old)
5867
{
5868
	struct intel_crtc *intel_crtc;
5869
	struct drm_crtc *possible_crtc;
5870
	struct drm_encoder *encoder = &intel_encoder->base;
5871
	struct drm_crtc *crtc = NULL;
5872
	struct drm_device *dev = encoder->dev;
5873
	struct drm_framebuffer *old_fb;
5874
	int i = -1;
2327 Serge 5875
 
2330 Serge 5876
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5877
		      connector->base.id, drm_get_connector_name(connector),
5878
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 5879
 
2330 Serge 5880
	/*
5881
	 * Algorithm gets a little messy:
5882
	 *
5883
	 *   - if the connector already has an assigned crtc, use it (but make
5884
	 *     sure it's on first)
5885
	 *
5886
	 *   - try to find the first unused crtc that can drive this connector,
5887
	 *     and use that if we find one
5888
	 */
2327 Serge 5889
 
2330 Serge 5890
	/* See if we already have a CRTC for this connector */
5891
	if (encoder->crtc) {
5892
		crtc = encoder->crtc;
2327 Serge 5893
 
2330 Serge 5894
		intel_crtc = to_intel_crtc(crtc);
5895
		old->dpms_mode = intel_crtc->dpms_mode;
5896
		old->load_detect_temp = false;
2327 Serge 5897
 
2330 Serge 5898
		/* Make sure the crtc and connector are running */
5899
		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5900
			struct drm_encoder_helper_funcs *encoder_funcs;
5901
			struct drm_crtc_helper_funcs *crtc_funcs;
2327 Serge 5902
 
2330 Serge 5903
			crtc_funcs = crtc->helper_private;
5904
			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
2327 Serge 5905
 
2330 Serge 5906
			encoder_funcs = encoder->helper_private;
5907
			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5908
		}
2327 Serge 5909
 
2330 Serge 5910
		return true;
5911
	}
2327 Serge 5912
 
2330 Serge 5913
	/* Find an unused one (if possible) */
5914
	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
5915
		i++;
5916
		if (!(encoder->possible_crtcs & (1 << i)))
5917
			continue;
5918
		if (!possible_crtc->enabled) {
5919
			crtc = possible_crtc;
5920
			break;
5921
		}
5922
	}
2327 Serge 5923
 
2330 Serge 5924
	/*
5925
	 * If we didn't find an unused CRTC, don't use any.
5926
	 */
5927
	if (!crtc) {
5928
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
5929
		return false;
5930
	}
2327 Serge 5931
 
2330 Serge 5932
	encoder->crtc = crtc;
5933
	connector->encoder = encoder;
2327 Serge 5934
 
2330 Serge 5935
	intel_crtc = to_intel_crtc(crtc);
5936
	old->dpms_mode = intel_crtc->dpms_mode;
5937
	old->load_detect_temp = true;
5938
	old->release_fb = NULL;
2327 Serge 5939
 
2330 Serge 5940
	if (!mode)
5941
		mode = &load_detect_mode;
2327 Serge 5942
 
2330 Serge 5943
	old_fb = crtc->fb;
2327 Serge 5944
 
2330 Serge 5945
	/* We need a framebuffer large enough to accommodate all accesses
5946
	 * that the plane may generate whilst we perform load detection.
5947
	 * We can not rely on the fbcon either being present (we get called
5948
	 * during its initialisation to detect all boot displays, or it may
5949
	 * not even exist) or that it is large enough to satisfy the
5950
	 * requested mode.
5951
	 */
5952
	crtc->fb = mode_fits_in_fbdev(dev, mode);
5953
	if (crtc->fb == NULL) {
5954
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5955
		crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5956
		old->release_fb = crtc->fb;
5957
	} else
5958
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5959
	if (IS_ERR(crtc->fb)) {
5960
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5961
		crtc->fb = old_fb;
5962
		return false;
5963
	}
2327 Serge 5964
 
2330 Serge 5965
	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
5966
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5967
		if (old->release_fb)
5968
			old->release_fb->funcs->destroy(old->release_fb);
5969
		crtc->fb = old_fb;
5970
		return false;
5971
	}
2327 Serge 5972
 
2330 Serge 5973
	/* let the connector get through one full cycle before testing */
5974
	intel_wait_for_vblank(dev, intel_crtc->pipe);
2327 Serge 5975
 
2330 Serge 5976
	return true;
5977
}
2327 Serge 5978
 
2330 Serge 5979
void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5980
				    struct drm_connector *connector,
5981
				    struct intel_load_detect_pipe *old)
5982
{
5983
	struct drm_encoder *encoder = &intel_encoder->base;
5984
	struct drm_device *dev = encoder->dev;
5985
	struct drm_crtc *crtc = encoder->crtc;
5986
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5987
	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2327 Serge 5988
 
2330 Serge 5989
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5990
		      connector->base.id, drm_get_connector_name(connector),
5991
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 5992
 
2330 Serge 5993
	if (old->load_detect_temp) {
5994
		connector->encoder = NULL;
5995
		drm_helper_disable_unused_functions(dev);
2327 Serge 5996
 
2330 Serge 5997
		if (old->release_fb)
5998
			old->release_fb->funcs->destroy(old->release_fb);
2327 Serge 5999
 
2330 Serge 6000
		return;
6001
	}
2327 Serge 6002
 
2330 Serge 6003
	/* Switch crtc and encoder back off if necessary */
6004
	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6005
		encoder_funcs->dpms(encoder, old->dpms_mode);
6006
		crtc_funcs->dpms(crtc, old->dpms_mode);
6007
	}
6008
}
2327 Serge 6009
 
2330 Serge 6010
/* Returns the clock of the currently programmed mode of the given pipe. */
6011
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6012
{
6013
	struct drm_i915_private *dev_priv = dev->dev_private;
6014
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6015
	int pipe = intel_crtc->pipe;
6016
	u32 dpll = I915_READ(DPLL(pipe));
6017
	u32 fp;
6018
	intel_clock_t clock;
2327 Serge 6019
 
2330 Serge 6020
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6021
		fp = I915_READ(FP0(pipe));
6022
	else
6023
		fp = I915_READ(FP1(pipe));
2327 Serge 6024
 
2330 Serge 6025
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6026
	if (IS_PINEVIEW(dev)) {
6027
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6028
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6029
	} else {
6030
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6031
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6032
	}
2327 Serge 6033
 
2330 Serge 6034
	if (!IS_GEN2(dev)) {
6035
		if (IS_PINEVIEW(dev))
6036
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6037
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6038
		else
6039
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6040
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 6041
 
2330 Serge 6042
		switch (dpll & DPLL_MODE_MASK) {
6043
		case DPLLB_MODE_DAC_SERIAL:
6044
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6045
				5 : 10;
6046
			break;
6047
		case DPLLB_MODE_LVDS:
6048
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6049
				7 : 14;
6050
			break;
6051
		default:
6052
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6053
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
6054
			return 0;
6055
		}
2327 Serge 6056
 
2330 Serge 6057
		/* XXX: Handle the 100Mhz refclk */
6058
		intel_clock(dev, 96000, &clock);
6059
	} else {
6060
		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
2327 Serge 6061
 
2330 Serge 6062
		if (is_lvds) {
6063
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6064
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6065
			clock.p2 = 14;
2327 Serge 6066
 
2330 Serge 6067
			if ((dpll & PLL_REF_INPUT_MASK) ==
6068
			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6069
				/* XXX: might not be 66MHz */
6070
				intel_clock(dev, 66000, &clock);
6071
			} else
6072
				intel_clock(dev, 48000, &clock);
6073
		} else {
6074
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6075
				clock.p1 = 2;
6076
			else {
6077
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6078
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6079
			}
6080
			if (dpll & PLL_P2_DIVIDE_BY_4)
6081
				clock.p2 = 4;
6082
			else
6083
				clock.p2 = 2;
2327 Serge 6084
 
2330 Serge 6085
			intel_clock(dev, 48000, &clock);
6086
		}
6087
	}
2327 Serge 6088
 
2330 Serge 6089
	/* XXX: It would be nice to validate the clocks, but we can't reuse
6090
	 * i830PllIsValid() because it relies on the xf86_config connector
6091
	 * configuration being accurate, which it isn't necessarily.
6092
	 */
2327 Serge 6093
 
2330 Serge 6094
	return clock.dot;
6095
}
2327 Serge 6096
 
2330 Serge 6097
/** Returns the currently programmed mode of the given pipe. */
6098
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6099
					     struct drm_crtc *crtc)
6100
{
6101
	struct drm_i915_private *dev_priv = dev->dev_private;
6102
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6103
	int pipe = intel_crtc->pipe;
6104
	struct drm_display_mode *mode;
6105
	int htot = I915_READ(HTOTAL(pipe));
6106
	int hsync = I915_READ(HSYNC(pipe));
6107
	int vtot = I915_READ(VTOTAL(pipe));
6108
	int vsync = I915_READ(VSYNC(pipe));
2327 Serge 6109
 
2330 Serge 6110
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6111
	if (!mode)
6112
		return NULL;
6113
 
6114
	mode->clock = intel_crtc_clock_get(dev, crtc);
6115
	mode->hdisplay = (htot & 0xffff) + 1;
6116
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6117
	mode->hsync_start = (hsync & 0xffff) + 1;
6118
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6119
	mode->vdisplay = (vtot & 0xffff) + 1;
6120
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6121
	mode->vsync_start = (vsync & 0xffff) + 1;
6122
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6123
 
6124
	drm_mode_set_name(mode);
6125
	drm_mode_set_crtcinfo(mode, 0);
6126
 
6127
	return mode;
6128
}
6129
 
6130
#define GPU_IDLE_TIMEOUT 500 /* ms */
6131
 
6132
 
6133
 
6134
 
6135
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
6136
 
6137
 
6138
 
6139
 
2327 Serge 6140
static void intel_increase_pllclock(struct drm_crtc *crtc)
6141
{
6142
	struct drm_device *dev = crtc->dev;
6143
	drm_i915_private_t *dev_priv = dev->dev_private;
6144
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6145
	int pipe = intel_crtc->pipe;
6146
	int dpll_reg = DPLL(pipe);
6147
	int dpll;
6148
 
2336 Serge 6149
    ENTER();
6150
 
2327 Serge 6151
	if (HAS_PCH_SPLIT(dev))
6152
		return;
6153
 
6154
	if (!dev_priv->lvds_downclock_avail)
6155
		return;
6156
 
6157
	dpll = I915_READ(dpll_reg);
6158
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
6159
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
6160
 
6161
		/* Unlock panel regs */
6162
		I915_WRITE(PP_CONTROL,
6163
			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
6164
 
6165
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
6166
		I915_WRITE(dpll_reg, dpll);
6167
		intel_wait_for_vblank(dev, pipe);
6168
 
6169
		dpll = I915_READ(dpll_reg);
6170
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
6171
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
6172
 
6173
		/* ...and lock them again */
6174
		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
6175
	}
6176
 
2336 Serge 6177
    LEAVE();
6178
 
2327 Serge 6179
	/* Schedule downclock */
6180
//	mod_timer(&intel_crtc->idle_timer, jiffies +
6181
//		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6182
}
6183
 
6184
 
6185
 
6186
 
6187
 
6188
 
6189
 
6190
 
6191
 
6192
 
6193
 
6194
 
6195
 
6196
 
6197
 
6198
 
6199
 
6200
 
6201
 
6202
 
6203
 
6204
 
2330 Serge 6205
static void intel_crtc_destroy(struct drm_crtc *crtc)
6206
{
6207
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6208
	struct drm_device *dev = crtc->dev;
6209
	struct intel_unpin_work *work;
6210
	unsigned long flags;
2327 Serge 6211
 
2330 Serge 6212
	spin_lock_irqsave(&dev->event_lock, flags);
6213
	work = intel_crtc->unpin_work;
6214
	intel_crtc->unpin_work = NULL;
6215
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 6216
 
2330 Serge 6217
	if (work) {
6218
//		cancel_work_sync(&work->work);
6219
		kfree(work);
6220
	}
2327 Serge 6221
 
2330 Serge 6222
	drm_crtc_cleanup(crtc);
2327 Serge 6223
 
2330 Serge 6224
	kfree(intel_crtc);
6225
}
2327 Serge 6226
 
6227
 
6228
 
6229
 
6230
 
6231
 
6232
 
6233
 
6234
 
6235
 
6236
 
6237
 
6238
 
6239
 
6240
 
6241
 
6242
 
6243
 
6244
 
6245
 
6246
 
6247
 
6248
 
6249
 
6250
 
6251
 
6252
 
6253
 
6254
 
6255
 
6256
 
6257
 
6258
 
6259
 
6260
 
6261
 
6262
 
6263
 
6264
 
6265
 
6266
 
6267
 
6268
 
6269
 
6270
 
6271
 
6272
 
6273
 
6274
 
6275
 
6276
 
6277
 
6278
 
6279
 
6280
 
6281
 
6282
 
6283
 
6284
 
6285
 
6286
 
6287
 
6288
 
6289
 
6290
 
6291
 
2330 Serge 6292
static void intel_sanitize_modesetting(struct drm_device *dev,
6293
				       int pipe, int plane)
6294
{
6295
	struct drm_i915_private *dev_priv = dev->dev_private;
6296
	u32 reg, val;
2327 Serge 6297
 
2330 Serge 6298
	if (HAS_PCH_SPLIT(dev))
6299
		return;
2327 Serge 6300
 
2330 Serge 6301
	/* Who knows what state these registers were left in by the BIOS or
6302
	 * grub?
6303
	 *
6304
	 * If we leave the registers in a conflicting state (e.g. with the
6305
	 * display plane reading from the other pipe than the one we intend
6306
	 * to use) then when we attempt to teardown the active mode, we will
6307
	 * not disable the pipes and planes in the correct order -- leaving
6308
	 * a plane reading from a disabled pipe and possibly leading to
6309
	 * undefined behaviour.
6310
	 */
2327 Serge 6311
 
2330 Serge 6312
	reg = DSPCNTR(plane);
6313
	val = I915_READ(reg);
2327 Serge 6314
 
2330 Serge 6315
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
6316
		return;
6317
	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6318
		return;
2327 Serge 6319
 
2330 Serge 6320
	/* This display plane is active and attached to the other CPU pipe. */
6321
	pipe = !pipe;
2327 Serge 6322
 
2330 Serge 6323
	/* Disable the plane and wait for it to stop reading from the pipe. */
6324
	intel_disable_plane(dev_priv, plane, pipe);
6325
	intel_disable_pipe(dev_priv, pipe);
6326
}
2327 Serge 6327
 
2330 Serge 6328
static void intel_crtc_reset(struct drm_crtc *crtc)
6329
{
6330
	struct drm_device *dev = crtc->dev;
6331
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6332
 
2330 Serge 6333
	/* Reset flags back to the 'unknown' status so that they
6334
	 * will be correctly set on the initial modeset.
6335
	 */
6336
	intel_crtc->dpms_mode = -1;
2327 Serge 6337
 
2330 Serge 6338
	/* We need to fix up any BIOS configuration that conflicts with
6339
	 * our expectations.
6340
	 */
6341
	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6342
}
2327 Serge 6343
 
2330 Serge 6344
static struct drm_crtc_helper_funcs intel_helper_funcs = {
6345
	.dpms = intel_crtc_dpms,
6346
	.mode_fixup = intel_crtc_mode_fixup,
6347
	.mode_set = intel_crtc_mode_set,
6348
	.mode_set_base = intel_pipe_set_base,
6349
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
6350
	.load_lut = intel_crtc_load_lut,
6351
	.disable = intel_crtc_disable,
6352
};
2327 Serge 6353
 
2330 Serge 6354
static const struct drm_crtc_funcs intel_crtc_funcs = {
6355
	.reset = intel_crtc_reset,
6356
//	.cursor_set = intel_crtc_cursor_set,
6357
//	.cursor_move = intel_crtc_cursor_move,
6358
	.gamma_set = intel_crtc_gamma_set,
6359
	.set_config = drm_crtc_helper_set_config,
6360
	.destroy = intel_crtc_destroy,
6361
//	.page_flip = intel_crtc_page_flip,
6362
};
2327 Serge 6363
 
2330 Serge 6364
static void intel_crtc_init(struct drm_device *dev, int pipe)
6365
{
6366
	drm_i915_private_t *dev_priv = dev->dev_private;
6367
	struct intel_crtc *intel_crtc;
6368
	int i;
2327 Serge 6369
 
2330 Serge 6370
	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
6371
	if (intel_crtc == NULL)
6372
		return;
2327 Serge 6373
 
2330 Serge 6374
	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
2327 Serge 6375
 
2330 Serge 6376
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
6377
	for (i = 0; i < 256; i++) {
6378
		intel_crtc->lut_r[i] = i;
6379
		intel_crtc->lut_g[i] = i;
6380
		intel_crtc->lut_b[i] = i;
6381
	}
2327 Serge 6382
 
2330 Serge 6383
	/* Swap pipes & planes for FBC on pre-965 */
6384
	intel_crtc->pipe = pipe;
6385
	intel_crtc->plane = pipe;
6386
	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
6387
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
6388
		intel_crtc->plane = !pipe;
6389
	}
2327 Serge 6390
 
2330 Serge 6391
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
6392
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
6393
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6394
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 6395
 
2330 Serge 6396
	intel_crtc_reset(&intel_crtc->base);
6397
	intel_crtc->active = true; /* force the pipe off on setup_init_config */
6398
	intel_crtc->bpp = 24; /* default for pre-Ironlake */
2327 Serge 6399
 
2330 Serge 6400
	if (HAS_PCH_SPLIT(dev)) {
6401
		intel_helper_funcs.prepare = ironlake_crtc_prepare;
6402
		intel_helper_funcs.commit = ironlake_crtc_commit;
6403
	} else {
6404
		intel_helper_funcs.prepare = i9xx_crtc_prepare;
6405
		intel_helper_funcs.commit = i9xx_crtc_commit;
6406
	}
2327 Serge 6407
 
2330 Serge 6408
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
2327 Serge 6409
 
2330 Serge 6410
	intel_crtc->busy = false;
2327 Serge 6411
 
2330 Serge 6412
//	setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6413
//		    (unsigned long)intel_crtc);
6414
}
2327 Serge 6415
 
6416
 
6417
 
6418
 
6419
 
6420
 
6421
 
2330 Serge 6422
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
6423
{
6424
	struct intel_encoder *encoder;
6425
	int index_mask = 0;
6426
	int entry = 0;
2327 Serge 6427
 
2330 Serge 6428
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6429
		if (type_mask & encoder->clone_mask)
6430
			index_mask |= (1 << entry);
6431
		entry++;
6432
	}
2327 Serge 6433
 
2330 Serge 6434
	return index_mask;
6435
}
2327 Serge 6436
 
2330 Serge 6437
static bool has_edp_a(struct drm_device *dev)
6438
{
6439
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 6440
 
2330 Serge 6441
	if (!IS_MOBILE(dev))
6442
		return false;
2327 Serge 6443
 
2330 Serge 6444
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6445
		return false;
2327 Serge 6446
 
2330 Serge 6447
	if (IS_GEN5(dev) &&
6448
	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6449
		return false;
2327 Serge 6450
 
2330 Serge 6451
	return true;
6452
}
2327 Serge 6453
 
2330 Serge 6454
static void intel_setup_outputs(struct drm_device *dev)
6455
{
6456
	struct drm_i915_private *dev_priv = dev->dev_private;
6457
	struct intel_encoder *encoder;
6458
	bool dpd_is_edp = false;
6459
	bool has_lvds = false;
2327 Serge 6460
 
2336 Serge 6461
    ENTER();
6462
 
2330 Serge 6463
	if (IS_MOBILE(dev) && !IS_I830(dev))
6464
		has_lvds = intel_lvds_init(dev);
6465
	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6466
		/* disable the panel fitter on everything but LVDS */
6467
		I915_WRITE(PFIT_CONTROL, 0);
6468
	}
2327 Serge 6469
 
2330 Serge 6470
	if (HAS_PCH_SPLIT(dev)) {
6471
		dpd_is_edp = intel_dpd_is_edp(dev);
2327 Serge 6472
 
2330 Serge 6473
		if (has_edp_a(dev))
6474
			intel_dp_init(dev, DP_A);
2327 Serge 6475
 
2330 Serge 6476
		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6477
			intel_dp_init(dev, PCH_DP_D);
6478
	}
2327 Serge 6479
 
2330 Serge 6480
	intel_crt_init(dev);
2327 Serge 6481
 
2330 Serge 6482
	if (HAS_PCH_SPLIT(dev)) {
6483
		int found;
2327 Serge 6484
 
2330 Serge 6485
		if (I915_READ(HDMIB) & PORT_DETECTED) {
6486
			/* PCH SDVOB multiplex with HDMIB */
6487
			found = intel_sdvo_init(dev, PCH_SDVOB);
6488
			if (!found)
6489
				intel_hdmi_init(dev, HDMIB);
6490
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6491
				intel_dp_init(dev, PCH_DP_B);
6492
		}
2327 Serge 6493
 
2330 Serge 6494
		if (I915_READ(HDMIC) & PORT_DETECTED)
6495
			intel_hdmi_init(dev, HDMIC);
2327 Serge 6496
 
2330 Serge 6497
		if (I915_READ(HDMID) & PORT_DETECTED)
6498
			intel_hdmi_init(dev, HDMID);
2327 Serge 6499
 
2330 Serge 6500
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
6501
			intel_dp_init(dev, PCH_DP_C);
2327 Serge 6502
 
2330 Serge 6503
		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6504
			intel_dp_init(dev, PCH_DP_D);
2327 Serge 6505
 
2330 Serge 6506
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6507
		bool found = false;
2327 Serge 6508
 
2330 Serge 6509
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6510
			DRM_DEBUG_KMS("probing SDVOB\n");
6511
			found = intel_sdvo_init(dev, SDVOB);
6512
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6513
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6514
				intel_hdmi_init(dev, SDVOB);
6515
			}
2327 Serge 6516
 
2330 Serge 6517
			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6518
				DRM_DEBUG_KMS("probing DP_B\n");
6519
				intel_dp_init(dev, DP_B);
6520
			}
6521
		}
2327 Serge 6522
 
2330 Serge 6523
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 6524
 
2330 Serge 6525
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6526
			DRM_DEBUG_KMS("probing SDVOC\n");
6527
			found = intel_sdvo_init(dev, SDVOC);
6528
		}
2327 Serge 6529
 
2330 Serge 6530
		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
2327 Serge 6531
 
2330 Serge 6532
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6533
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6534
				intel_hdmi_init(dev, SDVOC);
6535
			}
6536
			if (SUPPORTS_INTEGRATED_DP(dev)) {
6537
				DRM_DEBUG_KMS("probing DP_C\n");
6538
				intel_dp_init(dev, DP_C);
6539
			}
6540
		}
2327 Serge 6541
 
2330 Serge 6542
		if (SUPPORTS_INTEGRATED_DP(dev) &&
6543
		    (I915_READ(DP_D) & DP_DETECTED)) {
6544
			DRM_DEBUG_KMS("probing DP_D\n");
6545
			intel_dp_init(dev, DP_D);
6546
		}
6547
	} else if (IS_GEN2(dev))
6548
		intel_dvo_init(dev);
2327 Serge 6549
 
2330 Serge 6550
//   if (SUPPORTS_TV(dev))
6551
//       intel_tv_init(dev);
2327 Serge 6552
 
2330 Serge 6553
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6554
		encoder->base.possible_crtcs = encoder->crtc_mask;
6555
		encoder->base.possible_clones =
6556
			intel_encoder_clones(dev, encoder->clone_mask);
6557
	}
2327 Serge 6558
 
2330 Serge 6559
	/* disable all the possible outputs/crtcs before entering KMS mode */
6560
//	drm_helper_disable_unused_functions(dev);
2336 Serge 6561
 
6562
    LEAVE();
2330 Serge 6563
}
6564
 
6565
 
6566
 
6567
 
2327 Serge 6568
static const struct drm_mode_config_funcs intel_mode_funcs = {
6569
	.fb_create = NULL /*intel_user_framebuffer_create*/,
6570
	.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
6571
};
6572
 
6573
 
6574
 
6575
 
6576
 
6577
 
6578
 
6579
 
6580
 
6581
 
6582
 
6583
 
6584
 
2335 Serge 6585
static const struct drm_framebuffer_funcs intel_fb_funcs = {
6586
//	.destroy = intel_user_framebuffer_destroy,
6587
//	.create_handle = intel_user_framebuffer_create_handle,
6588
};
2327 Serge 6589
 
2335 Serge 6590
int intel_framebuffer_init(struct drm_device *dev,
6591
			   struct intel_framebuffer *intel_fb,
6592
			   struct drm_mode_fb_cmd *mode_cmd,
6593
			   struct drm_i915_gem_object *obj)
6594
{
6595
	int ret;
2327 Serge 6596
 
2335 Serge 6597
	if (obj->tiling_mode == I915_TILING_Y)
6598
		return -EINVAL;
2327 Serge 6599
 
2335 Serge 6600
	if (mode_cmd->pitch & 63)
6601
		return -EINVAL;
2327 Serge 6602
 
2335 Serge 6603
	switch (mode_cmd->bpp) {
6604
	case 8:
6605
	case 16:
6606
		/* Only pre-ILK can handle 5:5:5 */
6607
		if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
6608
			return -EINVAL;
6609
		break;
2327 Serge 6610
 
2335 Serge 6611
	case 24:
6612
	case 32:
6613
		break;
6614
	default:
6615
		return -EINVAL;
6616
	}
2327 Serge 6617
 
2335 Serge 6618
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
6619
	if (ret) {
6620
		DRM_ERROR("framebuffer init failed %d\n", ret);
6621
		return ret;
6622
	}
2327 Serge 6623
 
2335 Serge 6624
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
6625
	intel_fb->obj = obj;
6626
	return 0;
6627
}
2327 Serge 6628
 
6629
 
6630
 
6631
 
6632
 
6633
 
6634
 
6635
 
6636
 
6637
 
6638
 
6639
 
2330 Serge 6640
bool ironlake_set_drps(struct drm_device *dev, u8 val)
6641
{
6642
	struct drm_i915_private *dev_priv = dev->dev_private;
6643
	u16 rgvswctl;
2327 Serge 6644
 
2330 Serge 6645
	rgvswctl = I915_READ16(MEMSWCTL);
6646
	if (rgvswctl & MEMCTL_CMD_STS) {
6647
		DRM_DEBUG("gpu busy, RCS change rejected\n");
6648
		return false; /* still busy with another command */
6649
	}
2327 Serge 6650
 
2330 Serge 6651
	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
6652
		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
6653
	I915_WRITE16(MEMSWCTL, rgvswctl);
6654
	POSTING_READ16(MEMSWCTL);
2327 Serge 6655
 
2330 Serge 6656
	rgvswctl |= MEMCTL_CMD_STS;
6657
	I915_WRITE16(MEMSWCTL, rgvswctl);
2327 Serge 6658
 
2330 Serge 6659
	return true;
6660
}
2327 Serge 6661
 
2330 Serge 6662
void ironlake_enable_drps(struct drm_device *dev)
6663
{
6664
	struct drm_i915_private *dev_priv = dev->dev_private;
6665
	u32 rgvmodectl = I915_READ(MEMMODECTL);
6666
	u8 fmax, fmin, fstart, vstart;
2327 Serge 6667
 
2330 Serge 6668
	/* Enable temp reporting */
6669
	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
6670
	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2327 Serge 6671
 
2330 Serge 6672
	/* 100ms RC evaluation intervals */
6673
	I915_WRITE(RCUPEI, 100000);
6674
	I915_WRITE(RCDNEI, 100000);
2327 Serge 6675
 
2330 Serge 6676
	/* Set max/min thresholds to 90ms and 80ms respectively */
6677
	I915_WRITE(RCBMAXAVG, 90000);
6678
	I915_WRITE(RCBMINAVG, 80000);
2327 Serge 6679
 
2330 Serge 6680
	I915_WRITE(MEMIHYST, 1);
2327 Serge 6681
 
2330 Serge 6682
	/* Set up min, max, and cur for interrupt handling */
6683
	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
6684
	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
6685
	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
6686
		MEMMODE_FSTART_SHIFT;
2327 Serge 6687
 
2330 Serge 6688
	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
6689
		PXVFREQ_PX_SHIFT;
2327 Serge 6690
 
2330 Serge 6691
	dev_priv->fmax = fmax; /* IPS callback will increase this */
6692
	dev_priv->fstart = fstart;
2327 Serge 6693
 
2330 Serge 6694
	dev_priv->max_delay = fstart;
6695
	dev_priv->min_delay = fmin;
6696
	dev_priv->cur_delay = fstart;
2327 Serge 6697
 
2330 Serge 6698
	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
6699
			 fmax, fmin, fstart);
2327 Serge 6700
 
2330 Serge 6701
	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2327 Serge 6702
 
2330 Serge 6703
	/*
6704
	 * Interrupts will be enabled in ironlake_irq_postinstall
6705
	 */
2327 Serge 6706
 
2330 Serge 6707
	I915_WRITE(VIDSTART, vstart);
6708
	POSTING_READ(VIDSTART);
2327 Serge 6709
 
2330 Serge 6710
	rgvmodectl |= MEMMODE_SWMODE_EN;
6711
	I915_WRITE(MEMMODECTL, rgvmodectl);
2327 Serge 6712
 
2330 Serge 6713
	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
6714
		DRM_ERROR("stuck trying to change perf mode\n");
6715
	msleep(1);
2327 Serge 6716
 
2330 Serge 6717
	ironlake_set_drps(dev, fstart);
2327 Serge 6718
 
2330 Serge 6719
	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
6720
		I915_READ(0x112e0);
6721
//   dev_priv->last_time1 = jiffies_to_msecs(jiffies);
6722
	dev_priv->last_count2 = I915_READ(0x112f4);
6723
//   getrawmonotonic(&dev_priv->last_time2);
6724
}
2327 Serge 6725
 
6726
 
6727
 
6728
 
6729
 
6730
 
6731
 
6732
 
6733
 
6734
 
6735
 
6736
 
2330 Serge 6737
static unsigned long intel_pxfreq(u32 vidfreq)
6738
{
6739
	unsigned long freq;
6740
	int div = (vidfreq & 0x3f0000) >> 16;
6741
	int post = (vidfreq & 0x3000) >> 12;
6742
	int pre = (vidfreq & 0x7);
2327 Serge 6743
 
2330 Serge 6744
	if (!pre)
6745
		return 0;
2327 Serge 6746
 
2330 Serge 6747
	freq = ((div * 133333) / ((1<
2327 Serge 6748
 
2330 Serge 6749
	return freq;
6750
}
2327 Serge 6751
 
2330 Serge 6752
void intel_init_emon(struct drm_device *dev)
6753
{
6754
	struct drm_i915_private *dev_priv = dev->dev_private;
6755
	u32 lcfuse;
6756
	u8 pxw[16];
6757
	int i;
2327 Serge 6758
 
2330 Serge 6759
	/* Disable to program */
6760
	I915_WRITE(ECR, 0);
6761
	POSTING_READ(ECR);
2327 Serge 6762
 
2330 Serge 6763
	/* Program energy weights for various events */
6764
	I915_WRITE(SDEW, 0x15040d00);
6765
	I915_WRITE(CSIEW0, 0x007f0000);
6766
	I915_WRITE(CSIEW1, 0x1e220004);
6767
	I915_WRITE(CSIEW2, 0x04000004);
2327 Serge 6768
 
2330 Serge 6769
	for (i = 0; i < 5; i++)
6770
		I915_WRITE(PEW + (i * 4), 0);
6771
	for (i = 0; i < 3; i++)
6772
		I915_WRITE(DEW + (i * 4), 0);
2327 Serge 6773
 
2330 Serge 6774
	/* Program P-state weights to account for frequency power adjustment */
6775
	for (i = 0; i < 16; i++) {
6776
		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
6777
		unsigned long freq = intel_pxfreq(pxvidfreq);
6778
		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6779
			PXVFREQ_PX_SHIFT;
6780
		unsigned long val;
2327 Serge 6781
 
2330 Serge 6782
		val = vid * vid;
6783
		val *= (freq / 1000);
6784
		val *= 255;
6785
		val /= (127*127*900);
6786
		if (val > 0xff)
6787
			DRM_ERROR("bad pxval: %ld\n", val);
6788
		pxw[i] = val;
6789
	}
6790
	/* Render standby states get 0 weight */
6791
	pxw[14] = 0;
6792
	pxw[15] = 0;
2327 Serge 6793
 
2330 Serge 6794
	for (i = 0; i < 4; i++) {
6795
		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6796
			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6797
		I915_WRITE(PXW + (i * 4), val);
6798
	}
2327 Serge 6799
 
2330 Serge 6800
	/* Adjust magic regs to magic values (more experimental results) */
6801
	I915_WRITE(OGW0, 0);
6802
	I915_WRITE(OGW1, 0);
6803
	I915_WRITE(EG0, 0x00007f00);
6804
	I915_WRITE(EG1, 0x0000000e);
6805
	I915_WRITE(EG2, 0x000e0000);
6806
	I915_WRITE(EG3, 0x68000300);
6807
	I915_WRITE(EG4, 0x42000000);
6808
	I915_WRITE(EG5, 0x00140031);
6809
	I915_WRITE(EG6, 0);
6810
	I915_WRITE(EG7, 0);
2327 Serge 6811
 
2330 Serge 6812
	for (i = 0; i < 8; i++)
6813
		I915_WRITE(PXWL + (i * 4), 0);
2327 Serge 6814
 
2330 Serge 6815
	/* Enable PMON + select events */
6816
	I915_WRITE(ECR, 0x80000019);
2327 Serge 6817
 
2330 Serge 6818
	lcfuse = I915_READ(LCFUSE02);
6819
 
6820
	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
6821
}
6822
 
6823
void gen6_enable_rps(struct drm_i915_private *dev_priv)
6824
{
6825
	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6826
	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
6827
	u32 pcu_mbox, rc6_mask = 0;
6828
	int cur_freq, min_freq, max_freq;
6829
	int i;
6830
 
6831
	/* Here begins a magic sequence of register writes to enable
6832
	 * auto-downclocking.
6833
	 *
6834
	 * Perhaps there might be some value in exposing these to
6835
	 * userspace...
6836
	 */
6837
	I915_WRITE(GEN6_RC_STATE, 0);
6838
	mutex_lock(&dev_priv->dev->struct_mutex);
6839
	gen6_gt_force_wake_get(dev_priv);
6840
 
6841
	/* disable the counters and set deterministic thresholds */
6842
	I915_WRITE(GEN6_RC_CONTROL, 0);
6843
 
6844
	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
6845
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
6846
	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
6847
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6848
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6849
 
6850
	for (i = 0; i < I915_NUM_RINGS; i++)
6851
		I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
6852
 
6853
	I915_WRITE(GEN6_RC_SLEEP, 0);
6854
	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
6855
	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
6856
	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
6857
	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6858
 
6859
	if (i915_enable_rc6)
6860
		rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
6861
			GEN6_RC_CTL_RC6_ENABLE;
6862
 
6863
	I915_WRITE(GEN6_RC_CONTROL,
6864
		   rc6_mask |
6865
		   GEN6_RC_CTL_EI_MODE(1) |
6866
		   GEN6_RC_CTL_HW_ENABLE);
6867
 
6868
	I915_WRITE(GEN6_RPNSWREQ,
6869
		   GEN6_FREQUENCY(10) |
6870
		   GEN6_OFFSET(0) |
6871
		   GEN6_AGGRESSIVE_TURBO);
6872
	I915_WRITE(GEN6_RC_VIDEO_FREQ,
6873
		   GEN6_FREQUENCY(12));
6874
 
6875
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6876
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6877
		   18 << 24 |
6878
		   6 << 16);
6879
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
6880
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
6881
	I915_WRITE(GEN6_RP_UP_EI, 100000);
6882
	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
6883
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6884
	I915_WRITE(GEN6_RP_CONTROL,
6885
		   GEN6_RP_MEDIA_TURBO |
6886
		   GEN6_RP_USE_NORMAL_FREQ |
6887
		   GEN6_RP_MEDIA_IS_GFX |
6888
		   GEN6_RP_ENABLE |
6889
		   GEN6_RP_UP_BUSY_AVG |
6890
		   GEN6_RP_DOWN_IDLE_CONT);
6891
 
6892
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6893
		     500))
6894
		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6895
 
6896
	I915_WRITE(GEN6_PCODE_DATA, 0);
6897
	I915_WRITE(GEN6_PCODE_MAILBOX,
6898
		   GEN6_PCODE_READY |
6899
		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
6900
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6901
		     500))
6902
		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6903
 
6904
	min_freq = (rp_state_cap & 0xff0000) >> 16;
6905
	max_freq = rp_state_cap & 0xff;
6906
	cur_freq = (gt_perf_status & 0xff00) >> 8;
6907
 
6908
	/* Check for overclock support */
6909
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6910
		     500))
6911
		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6912
	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
6913
	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
6914
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6915
		     500))
6916
		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6917
	if (pcu_mbox & (1<<31)) { /* OC supported */
6918
		max_freq = pcu_mbox & 0xff;
6919
		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
6920
	}
6921
 
6922
	/* In units of 100MHz */
6923
	dev_priv->max_delay = max_freq;
6924
	dev_priv->min_delay = min_freq;
6925
	dev_priv->cur_delay = cur_freq;
6926
 
6927
	/* requires MSI enabled */
6928
	I915_WRITE(GEN6_PMIER,
6929
		   GEN6_PM_MBOX_EVENT |
6930
		   GEN6_PM_THERMAL_EVENT |
6931
		   GEN6_PM_RP_DOWN_TIMEOUT |
6932
		   GEN6_PM_RP_UP_THRESHOLD |
6933
		   GEN6_PM_RP_DOWN_THRESHOLD |
6934
		   GEN6_PM_RP_UP_EI_EXPIRED |
6935
		   GEN6_PM_RP_DOWN_EI_EXPIRED);
6936
//   spin_lock_irq(&dev_priv->rps_lock);
6937
//   WARN_ON(dev_priv->pm_iir != 0);
6938
	I915_WRITE(GEN6_PMIMR, 0);
6939
//   spin_unlock_irq(&dev_priv->rps_lock);
6940
	/* enable all PM interrupts */
6941
	I915_WRITE(GEN6_PMINTRMSK, 0);
6942
 
6943
	gen6_gt_force_wake_put(dev_priv);
6944
	mutex_unlock(&dev_priv->dev->struct_mutex);
6945
}
6946
 
6947
void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
6948
{
6949
	int min_freq = 15;
6950
	int gpu_freq, ia_freq, max_ia_freq;
6951
	int scaling_factor = 180;
6952
 
6953
//   max_ia_freq = cpufreq_quick_get_max(0);
6954
	/*
6955
	 * Default to measured freq if none found, PCU will ensure we don't go
6956
	 * over
6957
	 */
6958
//   if (!max_ia_freq)
6959
		max_ia_freq = 3000000; //tsc_khz;
6960
 
6961
	/* Convert from kHz to MHz */
6962
	max_ia_freq /= 1000;
6963
 
6964
	mutex_lock(&dev_priv->dev->struct_mutex);
6965
 
6966
	/*
6967
	 * For each potential GPU frequency, load a ring frequency we'd like
6968
	 * to use for memory access.  We do this by specifying the IA frequency
6969
	 * the PCU should use as a reference to determine the ring frequency.
6970
	 */
6971
	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
6972
	     gpu_freq--) {
6973
		int diff = dev_priv->max_delay - gpu_freq;
6974
 
6975
		/*
6976
		 * For GPU frequencies less than 750MHz, just use the lowest
6977
		 * ring freq.
6978
		 */
6979
		if (gpu_freq < min_freq)
6980
			ia_freq = 800;
6981
		else
6982
			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
6983
		ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
6984
 
6985
		I915_WRITE(GEN6_PCODE_DATA,
6986
			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
6987
			   gpu_freq);
6988
		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
6989
			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
6990
		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
6991
			      GEN6_PCODE_READY) == 0, 10)) {
6992
			DRM_ERROR("pcode write of freq table timed out\n");
6993
			continue;
6994
		}
6995
	}
6996
 
6997
	mutex_unlock(&dev_priv->dev->struct_mutex);
6998
}
6999
 
2327 Serge 7000
static void ironlake_init_clock_gating(struct drm_device *dev)
7001
{
7002
    struct drm_i915_private *dev_priv = dev->dev_private;
7003
    uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7004
 
7005
    /* Required for FBC */
7006
    dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
7007
        DPFCRUNIT_CLOCK_GATE_DISABLE |
7008
        DPFDUNIT_CLOCK_GATE_DISABLE;
7009
    /* Required for CxSR */
7010
    dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
7011
 
7012
    I915_WRITE(PCH_3DCGDIS0,
7013
           MARIUNIT_CLOCK_GATE_DISABLE |
7014
           SVSMUNIT_CLOCK_GATE_DISABLE);
7015
    I915_WRITE(PCH_3DCGDIS1,
7016
           VFMUNIT_CLOCK_GATE_DISABLE);
7017
 
7018
    I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7019
 
7020
    /*
7021
     * According to the spec the following bits should be set in
7022
     * order to enable memory self-refresh
7023
     * The bit 22/21 of 0x42004
7024
     * The bit 5 of 0x42020
7025
     * The bit 15 of 0x45000
7026
     */
7027
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
7028
           (I915_READ(ILK_DISPLAY_CHICKEN2) |
7029
            ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7030
    I915_WRITE(ILK_DSPCLK_GATE,
7031
           (I915_READ(ILK_DSPCLK_GATE) |
7032
            ILK_DPARB_CLK_GATE));
7033
    I915_WRITE(DISP_ARB_CTL,
7034
           (I915_READ(DISP_ARB_CTL) |
7035
            DISP_FBC_WM_DIS));
7036
    I915_WRITE(WM3_LP_ILK, 0);
7037
    I915_WRITE(WM2_LP_ILK, 0);
7038
    I915_WRITE(WM1_LP_ILK, 0);
7039
 
7040
    /*
7041
     * Based on the document from hardware guys the following bits
7042
     * should be set unconditionally in order to enable FBC.
7043
     * The bit 22 of 0x42000
7044
     * The bit 22 of 0x42004
7045
     * The bit 7,8,9 of 0x42020.
7046
     */
7047
    if (IS_IRONLAKE_M(dev)) {
7048
        I915_WRITE(ILK_DISPLAY_CHICKEN1,
7049
               I915_READ(ILK_DISPLAY_CHICKEN1) |
7050
               ILK_FBCQ_DIS);
7051
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
7052
               I915_READ(ILK_DISPLAY_CHICKEN2) |
7053
               ILK_DPARB_GATE);
7054
        I915_WRITE(ILK_DSPCLK_GATE,
7055
               I915_READ(ILK_DSPCLK_GATE) |
7056
               ILK_DPFC_DIS1 |
7057
               ILK_DPFC_DIS2 |
7058
               ILK_CLK_FBC);
7059
    }
7060
 
7061
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
7062
           I915_READ(ILK_DISPLAY_CHICKEN2) |
7063
           ILK_ELPIN_409_SELECT);
7064
    I915_WRITE(_3D_CHICKEN2,
7065
           _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7066
           _3D_CHICKEN2_WM_READ_PIPELINED);
7067
}
7068
 
7069
static void gen6_init_clock_gating(struct drm_device *dev)
7070
{
7071
	struct drm_i915_private *dev_priv = dev->dev_private;
7072
	int pipe;
7073
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7074
 
7075
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7076
 
7077
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7078
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7079
		   ILK_ELPIN_409_SELECT);
7080
 
7081
	I915_WRITE(WM3_LP_ILK, 0);
7082
	I915_WRITE(WM2_LP_ILK, 0);
7083
	I915_WRITE(WM1_LP_ILK, 0);
7084
 
7085
	/*
7086
	 * According to the spec the following bits should be
7087
	 * set in order to enable memory self-refresh and fbc:
7088
	 * The bit21 and bit22 of 0x42000
7089
	 * The bit21 and bit22 of 0x42004
7090
	 * The bit5 and bit7 of 0x42020
7091
	 * The bit14 of 0x70180
7092
	 * The bit14 of 0x71180
7093
	 */
7094
	I915_WRITE(ILK_DISPLAY_CHICKEN1,
7095
		   I915_READ(ILK_DISPLAY_CHICKEN1) |
7096
		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7097
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7098
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7099
		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7100
	I915_WRITE(ILK_DSPCLK_GATE,
7101
		   I915_READ(ILK_DSPCLK_GATE) |
7102
		   ILK_DPARB_CLK_GATE  |
7103
		   ILK_DPFD_CLK_GATE);
7104
 
7105
	for_each_pipe(pipe) {
7106
		I915_WRITE(DSPCNTR(pipe),
7107
			   I915_READ(DSPCNTR(pipe)) |
7108
			   DISPPLANE_TRICKLE_FEED_DISABLE);
7109
		intel_flush_display_plane(dev_priv, pipe);
7110
	}
7111
}
7112
 
7113
static void ivybridge_init_clock_gating(struct drm_device *dev)
7114
{
7115
	struct drm_i915_private *dev_priv = dev->dev_private;
7116
	int pipe;
7117
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7118
 
7119
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7120
 
7121
	I915_WRITE(WM3_LP_ILK, 0);
7122
	I915_WRITE(WM2_LP_ILK, 0);
7123
	I915_WRITE(WM1_LP_ILK, 0);
7124
 
7125
	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
7126
 
7127
	for_each_pipe(pipe) {
7128
		I915_WRITE(DSPCNTR(pipe),
7129
			   I915_READ(DSPCNTR(pipe)) |
7130
			   DISPPLANE_TRICKLE_FEED_DISABLE);
7131
		intel_flush_display_plane(dev_priv, pipe);
7132
	}
7133
}
7134
 
7135
static void g4x_init_clock_gating(struct drm_device *dev)
7136
{
7137
    struct drm_i915_private *dev_priv = dev->dev_private;
7138
    uint32_t dspclk_gate;
7139
 
7140
    I915_WRITE(RENCLK_GATE_D1, 0);
7141
    I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7142
           GS_UNIT_CLOCK_GATE_DISABLE |
7143
           CL_UNIT_CLOCK_GATE_DISABLE);
7144
    I915_WRITE(RAMCLK_GATE_D, 0);
7145
    dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7146
        OVRUNIT_CLOCK_GATE_DISABLE |
7147
        OVCUNIT_CLOCK_GATE_DISABLE;
7148
    if (IS_GM45(dev))
7149
        dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7150
    I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7151
}
7152
 
7153
static void crestline_init_clock_gating(struct drm_device *dev)
7154
{
7155
	struct drm_i915_private *dev_priv = dev->dev_private;
7156
 
7157
	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7158
	I915_WRITE(RENCLK_GATE_D2, 0);
7159
	I915_WRITE(DSPCLK_GATE_D, 0);
7160
	I915_WRITE(RAMCLK_GATE_D, 0);
7161
	I915_WRITE16(DEUC, 0);
7162
}
7163
 
7164
static void broadwater_init_clock_gating(struct drm_device *dev)
7165
{
7166
	struct drm_i915_private *dev_priv = dev->dev_private;
7167
 
7168
	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7169
		   I965_RCC_CLOCK_GATE_DISABLE |
7170
		   I965_RCPB_CLOCK_GATE_DISABLE |
7171
		   I965_ISC_CLOCK_GATE_DISABLE |
7172
		   I965_FBC_CLOCK_GATE_DISABLE);
7173
	I915_WRITE(RENCLK_GATE_D2, 0);
7174
}
7175
 
7176
static void gen3_init_clock_gating(struct drm_device *dev)
7177
{
7178
    struct drm_i915_private *dev_priv = dev->dev_private;
7179
    u32 dstate = I915_READ(D_STATE);
7180
 
7181
    dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7182
        DSTATE_DOT_CLOCK_GATING;
7183
    I915_WRITE(D_STATE, dstate);
7184
}
7185
 
7186
static void i85x_init_clock_gating(struct drm_device *dev)
7187
{
7188
	struct drm_i915_private *dev_priv = dev->dev_private;
7189
 
7190
	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7191
}
7192
 
7193
static void i830_init_clock_gating(struct drm_device *dev)
7194
{
7195
	struct drm_i915_private *dev_priv = dev->dev_private;
7196
 
7197
	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7198
}
7199
 
7200
static void ibx_init_clock_gating(struct drm_device *dev)
7201
{
7202
    struct drm_i915_private *dev_priv = dev->dev_private;
7203
 
7204
    /*
7205
     * On Ibex Peak and Cougar Point, we need to disable clock
7206
     * gating for the panel power sequencer or it will fail to
7207
     * start up when no ports are active.
7208
     */
7209
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7210
}
7211
 
7212
static void cpt_init_clock_gating(struct drm_device *dev)
7213
{
7214
    struct drm_i915_private *dev_priv = dev->dev_private;
7215
    int pipe;
7216
 
7217
    /*
7218
     * On Ibex Peak and Cougar Point, we need to disable clock
7219
     * gating for the panel power sequencer or it will fail to
7220
     * start up when no ports are active.
7221
     */
7222
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7223
    I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7224
           DPLS_EDP_PPS_FIX_DIS);
7225
    /* Without this, mode sets may fail silently on FDI */
7226
    for_each_pipe(pipe)
7227
        I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
7228
}
7229
 
2332 Serge 7230
static void ironlake_teardown_rc6(struct drm_device *dev)
7231
{
7232
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 7233
 
2332 Serge 7234
	if (dev_priv->renderctx) {
7235
//		i915_gem_object_unpin(dev_priv->renderctx);
7236
//		drm_gem_object_unreference(&dev_priv->renderctx->base);
7237
		dev_priv->renderctx = NULL;
7238
	}
2327 Serge 7239
 
2332 Serge 7240
	if (dev_priv->pwrctx) {
7241
//		i915_gem_object_unpin(dev_priv->pwrctx);
7242
//		drm_gem_object_unreference(&dev_priv->pwrctx->base);
7243
		dev_priv->pwrctx = NULL;
7244
	}
7245
}
2327 Serge 7246
 
2339 Serge 7247
static void ironlake_disable_rc6(struct drm_device *dev)
7248
{
7249
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 7250
 
2339 Serge 7251
	if (I915_READ(PWRCTXA)) {
7252
		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
7253
		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
7254
		wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
7255
			 50);
2332 Serge 7256
 
2339 Serge 7257
		I915_WRITE(PWRCTXA, 0);
7258
		POSTING_READ(PWRCTXA);
2332 Serge 7259
 
2339 Serge 7260
		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7261
		POSTING_READ(RSTDBYCTL);
7262
	}
2332 Serge 7263
 
2339 Serge 7264
	ironlake_teardown_rc6(dev);
7265
}
2332 Serge 7266
 
7267
static int ironlake_setup_rc6(struct drm_device *dev)
7268
{
7269
	struct drm_i915_private *dev_priv = dev->dev_private;
7270
 
7271
	if (dev_priv->renderctx == NULL)
7272
//		dev_priv->renderctx = intel_alloc_context_page(dev);
7273
	if (!dev_priv->renderctx)
7274
		return -ENOMEM;
7275
 
7276
	if (dev_priv->pwrctx == NULL)
7277
//		dev_priv->pwrctx = intel_alloc_context_page(dev);
7278
	if (!dev_priv->pwrctx) {
7279
		ironlake_teardown_rc6(dev);
7280
		return -ENOMEM;
7281
	}
7282
 
7283
	return 0;
7284
}
7285
 
7286
void ironlake_enable_rc6(struct drm_device *dev)
7287
{
7288
	struct drm_i915_private *dev_priv = dev->dev_private;
7289
	int ret;
7290
 
7291
	/* rc6 disabled by default due to repeated reports of hanging during
7292
	 * boot and resume.
7293
	 */
7294
	if (!i915_enable_rc6)
7295
		return;
7296
 
7297
	mutex_lock(&dev->struct_mutex);
7298
	ret = ironlake_setup_rc6(dev);
7299
	if (ret) {
7300
		mutex_unlock(&dev->struct_mutex);
7301
		return;
7302
	}
7303
 
7304
	/*
7305
	 * GPU can automatically power down the render unit if given a page
7306
	 * to save state.
7307
	 */
7308
#if 0
7309
	ret = BEGIN_LP_RING(6);
7310
	if (ret) {
7311
		ironlake_teardown_rc6(dev);
7312
		mutex_unlock(&dev->struct_mutex);
7313
		return;
7314
	}
7315
 
7316
	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
7317
	OUT_RING(MI_SET_CONTEXT);
7318
	OUT_RING(dev_priv->renderctx->gtt_offset |
7319
		 MI_MM_SPACE_GTT |
7320
		 MI_SAVE_EXT_STATE_EN |
7321
		 MI_RESTORE_EXT_STATE_EN |
7322
		 MI_RESTORE_INHIBIT);
7323
	OUT_RING(MI_SUSPEND_FLUSH);
7324
	OUT_RING(MI_NOOP);
7325
	OUT_RING(MI_FLUSH);
7326
	ADVANCE_LP_RING();
7327
 
7328
	/*
7329
	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
7330
	 * does an implicit flush, combined with MI_FLUSH above, it should be
7331
	 * safe to assume that renderctx is valid
7332
	 */
7333
	ret = intel_wait_ring_idle(LP_RING(dev_priv));
7334
	if (ret) {
7335
		DRM_ERROR("failed to enable ironlake power power savings\n");
7336
		ironlake_teardown_rc6(dev);
7337
		mutex_unlock(&dev->struct_mutex);
7338
		return;
7339
	}
7340
#endif
7341
 
7342
	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
7343
	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7344
	mutex_unlock(&dev->struct_mutex);
7345
}
7346
 
2330 Serge 7347
void intel_init_clock_gating(struct drm_device *dev)
7348
{
7349
	struct drm_i915_private *dev_priv = dev->dev_private;
7350
 
7351
	dev_priv->display.init_clock_gating(dev);
7352
 
7353
	if (dev_priv->display.init_pch_clock_gating)
7354
		dev_priv->display.init_pch_clock_gating(dev);
7355
}
7356
 
2327 Serge 7357
/* Set up chip specific display functions */
7358
static void intel_init_display(struct drm_device *dev)
7359
{
7360
    struct drm_i915_private *dev_priv = dev->dev_private;
7361
 
7362
    /* We always want a DPMS function */
7363
    if (HAS_PCH_SPLIT(dev)) {
7364
        dev_priv->display.dpms = ironlake_crtc_dpms;
7365
        dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7366
        dev_priv->display.update_plane = ironlake_update_plane;
7367
    } else {
7368
        dev_priv->display.dpms = i9xx_crtc_dpms;
7369
        dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7370
        dev_priv->display.update_plane = i9xx_update_plane;
7371
    }
7372
 
7373
    if (I915_HAS_FBC(dev)) {
7374
        if (HAS_PCH_SPLIT(dev)) {
7375
            dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7376
            dev_priv->display.enable_fbc = ironlake_enable_fbc;
7377
            dev_priv->display.disable_fbc = ironlake_disable_fbc;
7378
        } else if (IS_GM45(dev)) {
7379
            dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7380
            dev_priv->display.enable_fbc = g4x_enable_fbc;
7381
            dev_priv->display.disable_fbc = g4x_disable_fbc;
7382
        } else if (IS_CRESTLINE(dev)) {
7383
            dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7384
            dev_priv->display.enable_fbc = i8xx_enable_fbc;
7385
            dev_priv->display.disable_fbc = i8xx_disable_fbc;
7386
        }
7387
        /* 855GM needs testing */
7388
    }
7389
 
7390
    /* Returns the core display clock speed */
7391
    if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
7392
        dev_priv->display.get_display_clock_speed =
7393
            i945_get_display_clock_speed;
7394
    else if (IS_I915G(dev))
7395
        dev_priv->display.get_display_clock_speed =
7396
            i915_get_display_clock_speed;
7397
    else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
7398
        dev_priv->display.get_display_clock_speed =
7399
            i9xx_misc_get_display_clock_speed;
7400
    else if (IS_I915GM(dev))
7401
        dev_priv->display.get_display_clock_speed =
7402
            i915gm_get_display_clock_speed;
7403
    else if (IS_I865G(dev))
7404
        dev_priv->display.get_display_clock_speed =
7405
            i865_get_display_clock_speed;
7406
    else if (IS_I85X(dev))
7407
        dev_priv->display.get_display_clock_speed =
7408
            i855_get_display_clock_speed;
7409
    else /* 852, 830 */
7410
        dev_priv->display.get_display_clock_speed =
7411
            i830_get_display_clock_speed;
7412
 
7413
    /* For FIFO watermark updates */
7414
    if (HAS_PCH_SPLIT(dev)) {
7415
        if (HAS_PCH_IBX(dev))
7416
            dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
7417
        else if (HAS_PCH_CPT(dev))
7418
            dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
7419
 
7420
        if (IS_GEN5(dev)) {
7421
            if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
7422
                dev_priv->display.update_wm = ironlake_update_wm;
7423
            else {
7424
                DRM_DEBUG_KMS("Failed to get proper latency. "
7425
                          "Disable CxSR\n");
7426
                dev_priv->display.update_wm = NULL;
7427
            }
7428
            dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
7429
            dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7430
        } else if (IS_GEN6(dev)) {
7431
            if (SNB_READ_WM0_LATENCY()) {
7432
                dev_priv->display.update_wm = sandybridge_update_wm;
7433
            } else {
7434
                DRM_DEBUG_KMS("Failed to read display plane latency. "
7435
                          "Disable CxSR\n");
7436
                dev_priv->display.update_wm = NULL;
7437
            }
7438
            dev_priv->display.fdi_link_train = gen6_fdi_link_train;
7439
            dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7440
        } else if (IS_IVYBRIDGE(dev)) {
7441
            /* FIXME: detect B0+ stepping and use auto training */
7442
            dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
7443
            if (SNB_READ_WM0_LATENCY()) {
7444
                dev_priv->display.update_wm = sandybridge_update_wm;
7445
            } else {
7446
                DRM_DEBUG_KMS("Failed to read display plane latency. "
7447
                          "Disable CxSR\n");
7448
                dev_priv->display.update_wm = NULL;
7449
            }
7450
            dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7451
 
7452
        } else
7453
            dev_priv->display.update_wm = NULL;
7454
    } else if (IS_PINEVIEW(dev)) {
7455
        if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7456
                        dev_priv->is_ddr3,
7457
                        dev_priv->fsb_freq,
7458
                        dev_priv->mem_freq)) {
7459
            DRM_INFO("failed to find known CxSR latency "
7460
                 "(found ddr%s fsb freq %d, mem freq %d), "
7461
                 "disabling CxSR\n",
7462
                 (dev_priv->is_ddr3 == 1) ? "3": "2",
7463
                 dev_priv->fsb_freq, dev_priv->mem_freq);
7464
            /* Disable CxSR and never update its watermark again */
7465
            pineview_disable_cxsr(dev);
7466
            dev_priv->display.update_wm = NULL;
7467
        } else
7468
            dev_priv->display.update_wm = pineview_update_wm;
7469
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7470
    } else if (IS_G4X(dev)) {
7471
        dev_priv->display.update_wm = g4x_update_wm;
7472
        dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7473
    } else if (IS_GEN4(dev)) {
7474
        dev_priv->display.update_wm = i965_update_wm;
7475
        if (IS_CRESTLINE(dev))
7476
            dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7477
        else if (IS_BROADWATER(dev))
7478
            dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7479
    } else if (IS_GEN3(dev)) {
7480
        dev_priv->display.update_wm = i9xx_update_wm;
7481
        dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7482
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7483
    } else if (IS_I865G(dev)) {
7484
        dev_priv->display.update_wm = i830_update_wm;
7485
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7486
        dev_priv->display.get_fifo_size = i830_get_fifo_size;
7487
    } else if (IS_I85X(dev)) {
7488
        dev_priv->display.update_wm = i9xx_update_wm;
7489
        dev_priv->display.get_fifo_size = i85x_get_fifo_size;
7490
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7491
    } else {
7492
        dev_priv->display.update_wm = i830_update_wm;
7493
        dev_priv->display.init_clock_gating = i830_init_clock_gating;
7494
        if (IS_845G(dev))
7495
            dev_priv->display.get_fifo_size = i845_get_fifo_size;
7496
        else
7497
            dev_priv->display.get_fifo_size = i830_get_fifo_size;
7498
    }
7499
 
7500
    /* Default just returns -ENODEV to indicate unsupported */
7501
//    dev_priv->display.queue_flip = intel_default_queue_flip;
7502
 
7503
#if 0
7504
    switch (INTEL_INFO(dev)->gen) {
7505
    case 2:
7506
        dev_priv->display.queue_flip = intel_gen2_queue_flip;
7507
        break;
7508
 
7509
    case 3:
7510
        dev_priv->display.queue_flip = intel_gen3_queue_flip;
7511
        break;
7512
 
7513
    case 4:
7514
    case 5:
7515
        dev_priv->display.queue_flip = intel_gen4_queue_flip;
7516
        break;
7517
 
7518
    case 6:
7519
        dev_priv->display.queue_flip = intel_gen6_queue_flip;
7520
        break;
7521
    case 7:
7522
        dev_priv->display.queue_flip = intel_gen7_queue_flip;
7523
        break;
7524
    }
7525
#endif
7526
}
7527
 
7528
/*
7529
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
7530
 * resume, or other times.  This quirk makes sure that's the case for
7531
 * affected systems.
7532
 */
7533
static void quirk_pipea_force (struct drm_device *dev)
7534
{
7535
    struct drm_i915_private *dev_priv = dev->dev_private;
7536
 
7537
    dev_priv->quirks |= QUIRK_PIPEA_FORCE;
7538
    DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
7539
}
7540
 
7541
/*
7542
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
7543
 */
7544
static void quirk_ssc_force_disable(struct drm_device *dev)
7545
{
7546
    struct drm_i915_private *dev_priv = dev->dev_private;
7547
    dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
7548
}
7549
 
7550
struct intel_quirk {
7551
    int device;
7552
    int subsystem_vendor;
7553
    int subsystem_device;
7554
    void (*hook)(struct drm_device *dev);
7555
};
7556
 
7557
struct intel_quirk intel_quirks[] = {
7558
    /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
7559
    { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
7560
    /* HP Mini needs pipe A force quirk (LP: #322104) */
7561
    { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
7562
 
7563
    /* Thinkpad R31 needs pipe A force quirk */
7564
    { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
7565
    /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
7566
    { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
7567
 
7568
    /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
7569
    { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
7570
    /* ThinkPad X40 needs pipe A force quirk */
7571
 
7572
    /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
7573
    { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
7574
 
7575
    /* 855 & before need to leave pipe A & dpll A up */
7576
    { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7577
    { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7578
 
7579
    /* Lenovo U160 cannot use SSC on LVDS */
7580
    { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
7581
 
7582
    /* Sony Vaio Y cannot use SSC on LVDS */
7583
    { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
7584
};
7585
 
7586
static void intel_init_quirks(struct drm_device *dev)
7587
{
7588
    struct pci_dev *d = dev->pdev;
7589
    int i;
7590
 
7591
    for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
7592
        struct intel_quirk *q = &intel_quirks[i];
7593
 
7594
        if (d->device == q->device &&
7595
            (d->subsystem_vendor == q->subsystem_vendor ||
7596
             q->subsystem_vendor == PCI_ANY_ID) &&
7597
            (d->subsystem_device == q->subsystem_device ||
7598
             q->subsystem_device == PCI_ANY_ID))
7599
            q->hook(dev);
7600
    }
7601
}
7602
 
2330 Serge 7603
/* Disable the VGA plane that we never use */
7604
static void i915_disable_vga(struct drm_device *dev)
7605
{
7606
	struct drm_i915_private *dev_priv = dev->dev_private;
7607
	u8 sr1;
7608
	u32 vga_reg;
2327 Serge 7609
 
2330 Serge 7610
	if (HAS_PCH_SPLIT(dev))
7611
		vga_reg = CPU_VGACNTRL;
7612
	else
7613
		vga_reg = VGACNTRL;
7614
 
7615
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
7616
    out8(VGA_SR_INDEX, 1);
7617
    sr1 = in8(VGA_SR_DATA);
7618
    out8(VGA_SR_DATA,sr1 | 1<<5);
7619
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
7620
	udelay(300);
7621
 
7622
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
7623
	POSTING_READ(vga_reg);
7624
}
7625
 
2327 Serge 7626
void intel_modeset_init(struct drm_device *dev)
7627
{
7628
    struct drm_i915_private *dev_priv = dev->dev_private;
7629
    int i;
7630
 
7631
    drm_mode_config_init(dev);
7632
 
7633
    dev->mode_config.min_width = 0;
7634
    dev->mode_config.min_height = 0;
7635
 
7636
    dev->mode_config.funcs = (void *)&intel_mode_funcs;
7637
 
7638
    intel_init_quirks(dev);
7639
 
7640
    intel_init_display(dev);
7641
 
7642
    if (IS_GEN2(dev)) {
7643
        dev->mode_config.max_width = 2048;
7644
        dev->mode_config.max_height = 2048;
7645
    } else if (IS_GEN3(dev)) {
7646
        dev->mode_config.max_width = 4096;
7647
        dev->mode_config.max_height = 4096;
7648
    } else {
7649
        dev->mode_config.max_width = 8192;
7650
        dev->mode_config.max_height = 8192;
7651
    }
7652
    dev->mode_config.fb_base = get_bus_addr();
7653
 
7654
    DRM_DEBUG_KMS("%d display pipe%s available.\n",
7655
              dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
7656
 
7657
    for (i = 0; i < dev_priv->num_pipe; i++) {
7658
        intel_crtc_init(dev, i);
7659
    }
7660
 
7661
    /* Just disable it once at startup */
7662
    i915_disable_vga(dev);
7663
    intel_setup_outputs(dev);
7664
 
7665
    intel_init_clock_gating(dev);
7666
 
7667
    if (IS_IRONLAKE_M(dev)) {
7668
        ironlake_enable_drps(dev);
7669
        intel_init_emon(dev);
7670
    }
7671
 
7672
    if (IS_GEN6(dev) || IS_GEN7(dev)) {
7673
        gen6_enable_rps(dev_priv);
7674
        gen6_update_ring_freq(dev_priv);
7675
    }
7676
 
2332 Serge 7677
//   INIT_WORK(&dev_priv->idle_work, intel_idle_update);
7678
//   setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
7679
//           (unsigned long)dev);
2330 Serge 7680
}
2327 Serge 7681
 
2332 Serge 7682
void intel_modeset_gem_init(struct drm_device *dev)
7683
{
7684
	if (IS_IRONLAKE_M(dev))
7685
		ironlake_enable_rc6(dev);
2330 Serge 7686
 
2332 Serge 7687
//	intel_setup_overlay(dev);
7688
}
7689
 
7690
 
2330 Serge 7691
/*
7692
 * Return which encoder is currently attached for connector.
7693
 */
7694
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
7695
{
7696
	return &intel_attached_encoder(connector)->base;
2327 Serge 7697
}
7698
 
2330 Serge 7699
void intel_connector_attach_encoder(struct intel_connector *connector,
7700
				    struct intel_encoder *encoder)
7701
{
7702
	connector->encoder = encoder;
7703
	drm_mode_connector_attach_encoder(&connector->base,
7704
					  &encoder->base);
7705
}
2327 Serge 7706
 
2330 Serge 7707