Subversion Repositories Kolibri OS

Rev

Rev 2330 | Rev 2335 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
27
//#include 
28
#include 
29
//#include 
30
#include 
31
#include 
2330 Serge 32
#include 
2327 Serge 33
//#include 
34
#include "drmP.h"
35
#include "intel_drv.h"
2330 Serge 36
#include "i915_drm.h"
2327 Serge 37
#include "i915_drv.h"
38
//#include "i915_trace.h"
39
#include "drm_dp_helper.h"
40
 
41
#include "drm_crtc_helper.h"
42
 
43
phys_addr_t get_bus_addr(void);
44
 
45
static inline __attribute__((const))
46
bool is_power_of_2(unsigned long n)
47
{
48
    return (n != 0 && ((n & (n - 1)) == 0));
49
}
50
 
2330 Serge 51
#define MAX_ERRNO       4095
52
 
53
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
54
 
55
static inline long IS_ERR(const void *ptr)
56
{
57
    return IS_ERR_VALUE((unsigned long)ptr);
58
}
59
 
60
static inline void *ERR_PTR(long error)
61
{
62
    return (void *) error;
63
}
64
 
65
 
2327 Serge 66
static inline int pci_read_config_word(struct pci_dev *dev, int where,
67
                    u16 *val)
68
{
69
    *val = PciRead16(dev->busnr, dev->devfn, where);
70
    return 1;
71
}
72
 
73
 
74
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
75
 
76
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
77
static void intel_update_watermarks(struct drm_device *dev);
78
static void intel_increase_pllclock(struct drm_crtc *crtc);
79
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
80
 
81
typedef struct {
82
    /* given values */
83
    int n;
84
    int m1, m2;
85
    int p1, p2;
86
    /* derived values */
87
    int dot;
88
    int vco;
89
    int m;
90
    int p;
91
} intel_clock_t;
92
 
93
typedef struct {
94
    int min, max;
95
} intel_range_t;
96
 
97
typedef struct {
98
    int dot_limit;
99
    int p2_slow, p2_fast;
100
} intel_p2_t;
101
 
102
#define INTEL_P2_NUM              2
103
typedef struct intel_limit intel_limit_t;
104
struct intel_limit {
105
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
106
    intel_p2_t      p2;
107
    bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
108
              int, int, intel_clock_t *);
109
};
110
 
111
/* FDI */
112
#define IRONLAKE_FDI_FREQ       2700000 /* in kHz for mode->clock */
113
 
114
static bool
115
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
116
            int target, int refclk, intel_clock_t *best_clock);
117
static bool
118
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
119
            int target, int refclk, intel_clock_t *best_clock);
120
 
121
static bool
122
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
123
              int target, int refclk, intel_clock_t *best_clock);
124
static bool
125
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
126
               int target, int refclk, intel_clock_t *best_clock);
127
 
128
static inline u32 /* units of 100MHz */
129
intel_fdi_link_freq(struct drm_device *dev)
130
{
131
	if (IS_GEN5(dev)) {
132
		struct drm_i915_private *dev_priv = dev->dev_private;
133
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
134
	} else
135
		return 27;
136
}
137
 
138
static const intel_limit_t intel_limits_i8xx_dvo = {
139
        .dot = { .min = 25000, .max = 350000 },
140
        .vco = { .min = 930000, .max = 1400000 },
141
        .n = { .min = 3, .max = 16 },
142
        .m = { .min = 96, .max = 140 },
143
        .m1 = { .min = 18, .max = 26 },
144
        .m2 = { .min = 6, .max = 16 },
145
        .p = { .min = 4, .max = 128 },
146
        .p1 = { .min = 2, .max = 33 },
147
	.p2 = { .dot_limit = 165000,
148
		.p2_slow = 4, .p2_fast = 2 },
149
	.find_pll = intel_find_best_PLL,
150
};
151
 
152
static const intel_limit_t intel_limits_i8xx_lvds = {
153
        .dot = { .min = 25000, .max = 350000 },
154
        .vco = { .min = 930000, .max = 1400000 },
155
        .n = { .min = 3, .max = 16 },
156
        .m = { .min = 96, .max = 140 },
157
        .m1 = { .min = 18, .max = 26 },
158
        .m2 = { .min = 6, .max = 16 },
159
        .p = { .min = 4, .max = 128 },
160
        .p1 = { .min = 1, .max = 6 },
161
	.p2 = { .dot_limit = 165000,
162
		.p2_slow = 14, .p2_fast = 7 },
163
	.find_pll = intel_find_best_PLL,
164
};
165
 
166
static const intel_limit_t intel_limits_i9xx_sdvo = {
167
        .dot = { .min = 20000, .max = 400000 },
168
        .vco = { .min = 1400000, .max = 2800000 },
169
        .n = { .min = 1, .max = 6 },
170
        .m = { .min = 70, .max = 120 },
171
        .m1 = { .min = 10, .max = 22 },
172
        .m2 = { .min = 5, .max = 9 },
173
        .p = { .min = 5, .max = 80 },
174
        .p1 = { .min = 1, .max = 8 },
175
	.p2 = { .dot_limit = 200000,
176
		.p2_slow = 10, .p2_fast = 5 },
177
	.find_pll = intel_find_best_PLL,
178
};
179
 
180
static const intel_limit_t intel_limits_i9xx_lvds = {
181
        .dot = { .min = 20000, .max = 400000 },
182
        .vco = { .min = 1400000, .max = 2800000 },
183
        .n = { .min = 1, .max = 6 },
184
        .m = { .min = 70, .max = 120 },
185
        .m1 = { .min = 10, .max = 22 },
186
        .m2 = { .min = 5, .max = 9 },
187
        .p = { .min = 7, .max = 98 },
188
        .p1 = { .min = 1, .max = 8 },
189
	.p2 = { .dot_limit = 112000,
190
		.p2_slow = 14, .p2_fast = 7 },
191
	.find_pll = intel_find_best_PLL,
192
};
193
 
194
 
195
static const intel_limit_t intel_limits_g4x_sdvo = {
196
	.dot = { .min = 25000, .max = 270000 },
197
	.vco = { .min = 1750000, .max = 3500000},
198
	.n = { .min = 1, .max = 4 },
199
	.m = { .min = 104, .max = 138 },
200
	.m1 = { .min = 17, .max = 23 },
201
	.m2 = { .min = 5, .max = 11 },
202
	.p = { .min = 10, .max = 30 },
203
	.p1 = { .min = 1, .max = 3},
204
	.p2 = { .dot_limit = 270000,
205
		.p2_slow = 10,
206
		.p2_fast = 10
207
	},
208
	.find_pll = intel_g4x_find_best_PLL,
209
};
210
 
211
static const intel_limit_t intel_limits_g4x_hdmi = {
212
	.dot = { .min = 22000, .max = 400000 },
213
	.vco = { .min = 1750000, .max = 3500000},
214
	.n = { .min = 1, .max = 4 },
215
	.m = { .min = 104, .max = 138 },
216
	.m1 = { .min = 16, .max = 23 },
217
	.m2 = { .min = 5, .max = 11 },
218
	.p = { .min = 5, .max = 80 },
219
	.p1 = { .min = 1, .max = 8},
220
	.p2 = { .dot_limit = 165000,
221
		.p2_slow = 10, .p2_fast = 5 },
222
	.find_pll = intel_g4x_find_best_PLL,
223
};
224
 
225
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
226
	.dot = { .min = 20000, .max = 115000 },
227
	.vco = { .min = 1750000, .max = 3500000 },
228
	.n = { .min = 1, .max = 3 },
229
	.m = { .min = 104, .max = 138 },
230
	.m1 = { .min = 17, .max = 23 },
231
	.m2 = { .min = 5, .max = 11 },
232
	.p = { .min = 28, .max = 112 },
233
	.p1 = { .min = 2, .max = 8 },
234
	.p2 = { .dot_limit = 0,
235
		.p2_slow = 14, .p2_fast = 14
236
	},
237
	.find_pll = intel_g4x_find_best_PLL,
238
};
239
 
240
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
241
	.dot = { .min = 80000, .max = 224000 },
242
	.vco = { .min = 1750000, .max = 3500000 },
243
	.n = { .min = 1, .max = 3 },
244
	.m = { .min = 104, .max = 138 },
245
	.m1 = { .min = 17, .max = 23 },
246
	.m2 = { .min = 5, .max = 11 },
247
	.p = { .min = 14, .max = 42 },
248
	.p1 = { .min = 2, .max = 6 },
249
	.p2 = { .dot_limit = 0,
250
		.p2_slow = 7, .p2_fast = 7
251
	},
252
	.find_pll = intel_g4x_find_best_PLL,
253
};
254
 
255
static const intel_limit_t intel_limits_g4x_display_port = {
256
        .dot = { .min = 161670, .max = 227000 },
257
        .vco = { .min = 1750000, .max = 3500000},
258
        .n = { .min = 1, .max = 2 },
259
        .m = { .min = 97, .max = 108 },
260
        .m1 = { .min = 0x10, .max = 0x12 },
261
        .m2 = { .min = 0x05, .max = 0x06 },
262
        .p = { .min = 10, .max = 20 },
263
        .p1 = { .min = 1, .max = 2},
264
        .p2 = { .dot_limit = 0,
265
		.p2_slow = 10, .p2_fast = 10 },
266
        .find_pll = intel_find_pll_g4x_dp,
267
};
268
 
269
static const intel_limit_t intel_limits_pineview_sdvo = {
270
        .dot = { .min = 20000, .max = 400000},
271
        .vco = { .min = 1700000, .max = 3500000 },
272
	/* Pineview's Ncounter is a ring counter */
273
        .n = { .min = 3, .max = 6 },
274
        .m = { .min = 2, .max = 256 },
275
	/* Pineview only has one combined m divider, which we treat as m2. */
276
        .m1 = { .min = 0, .max = 0 },
277
        .m2 = { .min = 0, .max = 254 },
278
        .p = { .min = 5, .max = 80 },
279
        .p1 = { .min = 1, .max = 8 },
280
	.p2 = { .dot_limit = 200000,
281
		.p2_slow = 10, .p2_fast = 5 },
282
	.find_pll = intel_find_best_PLL,
283
};
284
 
285
static const intel_limit_t intel_limits_pineview_lvds = {
286
        .dot = { .min = 20000, .max = 400000 },
287
        .vco = { .min = 1700000, .max = 3500000 },
288
        .n = { .min = 3, .max = 6 },
289
        .m = { .min = 2, .max = 256 },
290
        .m1 = { .min = 0, .max = 0 },
291
        .m2 = { .min = 0, .max = 254 },
292
        .p = { .min = 7, .max = 112 },
293
        .p1 = { .min = 1, .max = 8 },
294
	.p2 = { .dot_limit = 112000,
295
		.p2_slow = 14, .p2_fast = 14 },
296
	.find_pll = intel_find_best_PLL,
297
};
298
 
299
/* Ironlake / Sandybridge
300
 *
301
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
302
 * the range value for them is (actual_value - 2).
303
 */
304
static const intel_limit_t intel_limits_ironlake_dac = {
305
	.dot = { .min = 25000, .max = 350000 },
306
	.vco = { .min = 1760000, .max = 3510000 },
307
	.n = { .min = 1, .max = 5 },
308
	.m = { .min = 79, .max = 127 },
309
	.m1 = { .min = 12, .max = 22 },
310
	.m2 = { .min = 5, .max = 9 },
311
	.p = { .min = 5, .max = 80 },
312
	.p1 = { .min = 1, .max = 8 },
313
	.p2 = { .dot_limit = 225000,
314
		.p2_slow = 10, .p2_fast = 5 },
315
	.find_pll = intel_g4x_find_best_PLL,
316
};
317
 
318
static const intel_limit_t intel_limits_ironlake_single_lvds = {
319
	.dot = { .min = 25000, .max = 350000 },
320
	.vco = { .min = 1760000, .max = 3510000 },
321
	.n = { .min = 1, .max = 3 },
322
	.m = { .min = 79, .max = 118 },
323
	.m1 = { .min = 12, .max = 22 },
324
	.m2 = { .min = 5, .max = 9 },
325
	.p = { .min = 28, .max = 112 },
326
	.p1 = { .min = 2, .max = 8 },
327
	.p2 = { .dot_limit = 225000,
328
		.p2_slow = 14, .p2_fast = 14 },
329
	.find_pll = intel_g4x_find_best_PLL,
330
};
331
 
332
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
333
	.dot = { .min = 25000, .max = 350000 },
334
	.vco = { .min = 1760000, .max = 3510000 },
335
	.n = { .min = 1, .max = 3 },
336
	.m = { .min = 79, .max = 127 },
337
	.m1 = { .min = 12, .max = 22 },
338
	.m2 = { .min = 5, .max = 9 },
339
	.p = { .min = 14, .max = 56 },
340
	.p1 = { .min = 2, .max = 8 },
341
	.p2 = { .dot_limit = 225000,
342
		.p2_slow = 7, .p2_fast = 7 },
343
	.find_pll = intel_g4x_find_best_PLL,
344
};
345
 
346
/* LVDS 100mhz refclk limits. */
347
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
348
	.dot = { .min = 25000, .max = 350000 },
349
	.vco = { .min = 1760000, .max = 3510000 },
350
	.n = { .min = 1, .max = 2 },
351
	.m = { .min = 79, .max = 126 },
352
	.m1 = { .min = 12, .max = 22 },
353
	.m2 = { .min = 5, .max = 9 },
354
	.p = { .min = 28, .max = 112 },
355
	.p1 = { .min = 2,.max = 8 },
356
	.p2 = { .dot_limit = 225000,
357
		.p2_slow = 14, .p2_fast = 14 },
358
	.find_pll = intel_g4x_find_best_PLL,
359
};
360
 
361
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
362
	.dot = { .min = 25000, .max = 350000 },
363
	.vco = { .min = 1760000, .max = 3510000 },
364
	.n = { .min = 1, .max = 3 },
365
	.m = { .min = 79, .max = 126 },
366
	.m1 = { .min = 12, .max = 22 },
367
	.m2 = { .min = 5, .max = 9 },
368
	.p = { .min = 14, .max = 42 },
369
	.p1 = { .min = 2,.max = 6 },
370
	.p2 = { .dot_limit = 225000,
371
		.p2_slow = 7, .p2_fast = 7 },
372
	.find_pll = intel_g4x_find_best_PLL,
373
};
374
 
375
static const intel_limit_t intel_limits_ironlake_display_port = {
376
        .dot = { .min = 25000, .max = 350000 },
377
        .vco = { .min = 1760000, .max = 3510000},
378
        .n = { .min = 1, .max = 2 },
379
        .m = { .min = 81, .max = 90 },
380
        .m1 = { .min = 12, .max = 22 },
381
        .m2 = { .min = 5, .max = 9 },
382
        .p = { .min = 10, .max = 20 },
383
        .p1 = { .min = 1, .max = 2},
384
        .p2 = { .dot_limit = 0,
385
		.p2_slow = 10, .p2_fast = 10 },
386
        .find_pll = intel_find_pll_ironlake_dp,
387
};
388
 
389
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
390
						int refclk)
391
{
392
	struct drm_device *dev = crtc->dev;
393
	struct drm_i915_private *dev_priv = dev->dev_private;
394
	const intel_limit_t *limit;
395
 
396
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
397
		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
398
		    LVDS_CLKB_POWER_UP) {
399
			/* LVDS dual channel */
400
			if (refclk == 100000)
401
				limit = &intel_limits_ironlake_dual_lvds_100m;
402
			else
403
				limit = &intel_limits_ironlake_dual_lvds;
404
		} else {
405
			if (refclk == 100000)
406
				limit = &intel_limits_ironlake_single_lvds_100m;
407
			else
408
				limit = &intel_limits_ironlake_single_lvds;
409
		}
410
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
411
			HAS_eDP)
412
		limit = &intel_limits_ironlake_display_port;
413
	else
414
		limit = &intel_limits_ironlake_dac;
415
 
416
	return limit;
417
}
418
 
419
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
420
{
421
	struct drm_device *dev = crtc->dev;
422
	struct drm_i915_private *dev_priv = dev->dev_private;
423
	const intel_limit_t *limit;
424
 
425
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
426
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
427
		    LVDS_CLKB_POWER_UP)
428
			/* LVDS with dual channel */
429
			limit = &intel_limits_g4x_dual_channel_lvds;
430
		else
431
			/* LVDS with dual channel */
432
			limit = &intel_limits_g4x_single_channel_lvds;
433
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
434
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
435
		limit = &intel_limits_g4x_hdmi;
436
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
437
		limit = &intel_limits_g4x_sdvo;
438
	} else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
439
		limit = &intel_limits_g4x_display_port;
440
	} else /* The option is for other outputs */
441
		limit = &intel_limits_i9xx_sdvo;
442
 
443
	return limit;
444
}
445
 
446
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
447
{
448
	struct drm_device *dev = crtc->dev;
449
	const intel_limit_t *limit;
450
 
451
	if (HAS_PCH_SPLIT(dev))
452
		limit = intel_ironlake_limit(crtc, refclk);
453
	else if (IS_G4X(dev)) {
454
		limit = intel_g4x_limit(crtc);
455
	} else if (IS_PINEVIEW(dev)) {
456
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
457
			limit = &intel_limits_pineview_lvds;
458
		else
459
			limit = &intel_limits_pineview_sdvo;
460
	} else if (!IS_GEN2(dev)) {
461
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
462
			limit = &intel_limits_i9xx_lvds;
463
		else
464
			limit = &intel_limits_i9xx_sdvo;
465
	} else {
466
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
467
			limit = &intel_limits_i8xx_lvds;
468
		else
469
			limit = &intel_limits_i8xx_dvo;
470
	}
471
	return limit;
472
}
473
 
474
/* m1 is reserved as 0 in Pineview, n is a ring counter */
475
static void pineview_clock(int refclk, intel_clock_t *clock)
476
{
477
	clock->m = clock->m2 + 2;
478
	clock->p = clock->p1 * clock->p2;
479
	clock->vco = refclk * clock->m / clock->n;
480
	clock->dot = clock->vco / clock->p;
481
}
482
 
483
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
484
{
485
	if (IS_PINEVIEW(dev)) {
486
		pineview_clock(refclk, clock);
487
		return;
488
	}
489
	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
490
	clock->p = clock->p1 * clock->p2;
491
	clock->vco = refclk * clock->m / (clock->n + 2);
492
	clock->dot = clock->vco / clock->p;
493
}
494
 
495
/**
496
 * Returns whether any output on the specified pipe is of the specified type
497
 */
498
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
499
{
500
	struct drm_device *dev = crtc->dev;
501
	struct drm_mode_config *mode_config = &dev->mode_config;
502
	struct intel_encoder *encoder;
503
 
504
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
505
		if (encoder->base.crtc == crtc && encoder->type == type)
506
			return true;
507
 
508
	return false;
509
}
510
 
511
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
512
/**
513
 * Returns whether the given set of divisors are valid for a given refclk with
514
 * the given connectors.
515
 */
516
 
517
static bool intel_PLL_is_valid(struct drm_device *dev,
518
			       const intel_limit_t *limit,
519
			       const intel_clock_t *clock)
520
{
521
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
522
		INTELPllInvalid ("p1 out of range\n");
523
	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
524
		INTELPllInvalid ("p out of range\n");
525
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
526
		INTELPllInvalid ("m2 out of range\n");
527
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
528
		INTELPllInvalid ("m1 out of range\n");
529
	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
530
		INTELPllInvalid ("m1 <= m2\n");
531
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
532
		INTELPllInvalid ("m out of range\n");
533
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
534
		INTELPllInvalid ("n out of range\n");
535
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
536
		INTELPllInvalid ("vco out of range\n");
537
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
538
	 * connector, etc., rather than just a single range.
539
	 */
540
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
541
		INTELPllInvalid ("dot out of range\n");
542
 
543
	return true;
544
}
545
 
546
static bool
547
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
548
		    int target, int refclk, intel_clock_t *best_clock)
549
 
550
{
551
	struct drm_device *dev = crtc->dev;
552
	struct drm_i915_private *dev_priv = dev->dev_private;
553
	intel_clock_t clock;
554
	int err = target;
555
 
556
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
557
	    (I915_READ(LVDS)) != 0) {
558
		/*
559
		 * For LVDS, if the panel is on, just rely on its current
560
		 * settings for dual-channel.  We haven't figured out how to
561
		 * reliably set up different single/dual channel state, if we
562
		 * even can.
563
		 */
564
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
565
		    LVDS_CLKB_POWER_UP)
566
			clock.p2 = limit->p2.p2_fast;
567
		else
568
			clock.p2 = limit->p2.p2_slow;
569
	} else {
570
		if (target < limit->p2.dot_limit)
571
			clock.p2 = limit->p2.p2_slow;
572
		else
573
			clock.p2 = limit->p2.p2_fast;
574
	}
575
 
576
	memset (best_clock, 0, sizeof (*best_clock));
577
 
578
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
579
	     clock.m1++) {
580
		for (clock.m2 = limit->m2.min;
581
		     clock.m2 <= limit->m2.max; clock.m2++) {
582
			/* m1 is always 0 in Pineview */
583
			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
584
				break;
585
			for (clock.n = limit->n.min;
586
			     clock.n <= limit->n.max; clock.n++) {
587
				for (clock.p1 = limit->p1.min;
588
					clock.p1 <= limit->p1.max; clock.p1++) {
589
					int this_err;
590
 
591
					intel_clock(dev, refclk, &clock);
592
					if (!intel_PLL_is_valid(dev, limit,
593
								&clock))
594
						continue;
595
 
596
					this_err = abs(clock.dot - target);
597
					if (this_err < err) {
598
						*best_clock = clock;
599
						err = this_err;
600
					}
601
				}
602
			}
603
		}
604
	}
605
 
606
	return (err != target);
607
}
608
 
609
static bool
610
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
611
			int target, int refclk, intel_clock_t *best_clock)
612
{
613
	struct drm_device *dev = crtc->dev;
614
	struct drm_i915_private *dev_priv = dev->dev_private;
615
	intel_clock_t clock;
616
	int max_n;
617
	bool found;
618
	/* approximately equals target * 0.00585 */
619
	int err_most = (target >> 8) + (target >> 9);
620
	found = false;
621
 
622
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
623
		int lvds_reg;
624
 
625
		if (HAS_PCH_SPLIT(dev))
626
			lvds_reg = PCH_LVDS;
627
		else
628
			lvds_reg = LVDS;
629
		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
630
		    LVDS_CLKB_POWER_UP)
631
			clock.p2 = limit->p2.p2_fast;
632
		else
633
			clock.p2 = limit->p2.p2_slow;
634
	} else {
635
		if (target < limit->p2.dot_limit)
636
			clock.p2 = limit->p2.p2_slow;
637
		else
638
			clock.p2 = limit->p2.p2_fast;
639
	}
640
 
641
	memset(best_clock, 0, sizeof(*best_clock));
642
	max_n = limit->n.max;
643
	/* based on hardware requirement, prefer smaller n to precision */
644
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
645
		/* based on hardware requirement, prefere larger m1,m2 */
646
		for (clock.m1 = limit->m1.max;
647
		     clock.m1 >= limit->m1.min; clock.m1--) {
648
			for (clock.m2 = limit->m2.max;
649
			     clock.m2 >= limit->m2.min; clock.m2--) {
650
				for (clock.p1 = limit->p1.max;
651
				     clock.p1 >= limit->p1.min; clock.p1--) {
652
					int this_err;
653
 
654
					intel_clock(dev, refclk, &clock);
655
					if (!intel_PLL_is_valid(dev, limit,
656
								&clock))
657
						continue;
658
 
659
					this_err = abs(clock.dot - target);
660
					if (this_err < err_most) {
661
						*best_clock = clock;
662
						err_most = this_err;
663
						max_n = clock.n;
664
						found = true;
665
					}
666
				}
667
			}
668
		}
669
	}
670
	return found;
671
}
672
 
673
static bool
674
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
675
			   int target, int refclk, intel_clock_t *best_clock)
676
{
677
	struct drm_device *dev = crtc->dev;
678
	intel_clock_t clock;
679
 
680
	if (target < 200000) {
681
		clock.n = 1;
682
		clock.p1 = 2;
683
		clock.p2 = 10;
684
		clock.m1 = 12;
685
		clock.m2 = 9;
686
	} else {
687
		clock.n = 2;
688
		clock.p1 = 1;
689
		clock.p2 = 10;
690
		clock.m1 = 14;
691
		clock.m2 = 8;
692
	}
693
	intel_clock(dev, refclk, &clock);
694
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
695
	return true;
696
}
697
 
698
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
699
static bool
700
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
701
		      int target, int refclk, intel_clock_t *best_clock)
702
{
703
	intel_clock_t clock;
704
	if (target < 200000) {
705
		clock.p1 = 2;
706
		clock.p2 = 10;
707
		clock.n = 2;
708
		clock.m1 = 23;
709
		clock.m2 = 8;
710
	} else {
711
		clock.p1 = 1;
712
		clock.p2 = 10;
713
		clock.n = 1;
714
		clock.m1 = 14;
715
		clock.m2 = 2;
716
	}
717
	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
718
	clock.p = (clock.p1 * clock.p2);
719
	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
720
	clock.vco = 0;
721
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
722
	return true;
723
}
724
 
725
/**
726
 * intel_wait_for_vblank - wait for vblank on a given pipe
727
 * @dev: drm device
728
 * @pipe: pipe to wait for
729
 *
730
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
731
 * mode setting code.
732
 */
733
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
734
{
735
	struct drm_i915_private *dev_priv = dev->dev_private;
736
	int pipestat_reg = PIPESTAT(pipe);
737
 
738
	/* Clear existing vblank status. Note this will clear any other
739
	 * sticky status fields as well.
740
	 *
741
	 * This races with i915_driver_irq_handler() with the result
742
	 * that either function could miss a vblank event.  Here it is not
743
	 * fatal, as we will either wait upon the next vblank interrupt or
744
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
745
	 * called during modeset at which time the GPU should be idle and
746
	 * should *not* be performing page flips and thus not waiting on
747
	 * vblanks...
748
	 * Currently, the result of us stealing a vblank from the irq
749
	 * handler is that a single frame will be skipped during swapbuffers.
750
	 */
751
	I915_WRITE(pipestat_reg,
752
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
753
 
754
	/* Wait for vblank interrupt bit to set */
755
	if (wait_for(I915_READ(pipestat_reg) &
756
		     PIPE_VBLANK_INTERRUPT_STATUS,
757
		     50))
758
		DRM_DEBUG_KMS("vblank wait timed out\n");
759
}
760
 
761
/*
762
 * intel_wait_for_pipe_off - wait for pipe to turn off
763
 * @dev: drm device
764
 * @pipe: pipe to wait for
765
 *
766
 * After disabling a pipe, we can't wait for vblank in the usual way,
767
 * spinning on the vblank interrupt status bit, since we won't actually
768
 * see an interrupt when the pipe is disabled.
769
 *
770
 * On Gen4 and above:
771
 *   wait for the pipe register state bit to turn off
772
 *
773
 * Otherwise:
774
 *   wait for the display line value to settle (it usually
775
 *   ends up stopping at the start of the next frame).
776
 *
777
 */
778
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
779
{
780
	struct drm_i915_private *dev_priv = dev->dev_private;
781
 
782
	if (INTEL_INFO(dev)->gen >= 4) {
783
		int reg = PIPECONF(pipe);
784
 
785
		/* Wait for the Pipe State to go off */
786
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
787
			     100))
788
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
789
	} else {
790
		u32 last_line;
791
		int reg = PIPEDSL(pipe);
792
		unsigned long timeout = jiffies + msecs_to_jiffies(100);
793
 
794
		/* Wait for the display line to settle */
795
		do {
796
			last_line = I915_READ(reg) & DSL_LINEMASK;
797
			mdelay(5);
798
		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
799
			 time_after(timeout, jiffies));
800
		if (time_after(jiffies, timeout))
801
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
802
	}
803
}
804
 
805
static const char *state_string(bool enabled)
806
{
807
	return enabled ? "on" : "off";
808
}
809
 
810
/* Only for pre-ILK configs */
811
static void assert_pll(struct drm_i915_private *dev_priv,
812
		       enum pipe pipe, bool state)
813
{
814
	int reg;
815
	u32 val;
816
	bool cur_state;
817
 
818
	reg = DPLL(pipe);
819
	val = I915_READ(reg);
820
	cur_state = !!(val & DPLL_VCO_ENABLE);
821
	WARN(cur_state != state,
822
	     "PLL state assertion failure (expected %s, current %s)\n",
823
	     state_string(state), state_string(cur_state));
824
}
825
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
826
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
827
 
828
/* For ILK+ */
829
static void assert_pch_pll(struct drm_i915_private *dev_priv,
830
			   enum pipe pipe, bool state)
831
{
832
	int reg;
833
	u32 val;
834
	bool cur_state;
835
 
836
	reg = PCH_DPLL(pipe);
837
	val = I915_READ(reg);
838
	cur_state = !!(val & DPLL_VCO_ENABLE);
839
	WARN(cur_state != state,
840
	     "PCH PLL state assertion failure (expected %s, current %s)\n",
841
	     state_string(state), state_string(cur_state));
842
}
843
#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
844
#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
845
 
846
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
847
			  enum pipe pipe, bool state)
848
{
849
	int reg;
850
	u32 val;
851
	bool cur_state;
852
 
853
	reg = FDI_TX_CTL(pipe);
854
	val = I915_READ(reg);
855
	cur_state = !!(val & FDI_TX_ENABLE);
856
	WARN(cur_state != state,
857
	     "FDI TX state assertion failure (expected %s, current %s)\n",
858
	     state_string(state), state_string(cur_state));
859
}
860
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
861
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
862
 
863
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
864
			  enum pipe pipe, bool state)
865
{
866
	int reg;
867
	u32 val;
868
	bool cur_state;
869
 
870
	reg = FDI_RX_CTL(pipe);
871
	val = I915_READ(reg);
872
	cur_state = !!(val & FDI_RX_ENABLE);
873
	WARN(cur_state != state,
874
	     "FDI RX state assertion failure (expected %s, current %s)\n",
875
	     state_string(state), state_string(cur_state));
876
}
877
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
878
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
879
 
880
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
881
				      enum pipe pipe)
882
{
883
	int reg;
884
	u32 val;
885
 
886
	/* ILK FDI PLL is always enabled */
887
	if (dev_priv->info->gen == 5)
888
		return;
889
 
890
	reg = FDI_TX_CTL(pipe);
891
	val = I915_READ(reg);
892
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
893
}
894
 
895
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
896
				      enum pipe pipe)
897
{
898
	int reg;
899
	u32 val;
900
 
901
	reg = FDI_RX_CTL(pipe);
902
	val = I915_READ(reg);
903
	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
904
}
905
 
906
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
907
				  enum pipe pipe)
908
{
909
	int pp_reg, lvds_reg;
910
	u32 val;
911
	enum pipe panel_pipe = PIPE_A;
912
	bool locked = true;
913
 
914
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
915
		pp_reg = PCH_PP_CONTROL;
916
		lvds_reg = PCH_LVDS;
917
	} else {
918
		pp_reg = PP_CONTROL;
919
		lvds_reg = LVDS;
920
	}
921
 
922
	val = I915_READ(pp_reg);
923
	if (!(val & PANEL_POWER_ON) ||
924
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
925
		locked = false;
926
 
927
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
928
		panel_pipe = PIPE_B;
929
 
930
	WARN(panel_pipe == pipe && locked,
931
	     "panel assertion failure, pipe %c regs locked\n",
932
	     pipe_name(pipe));
933
}
934
 
935
static void assert_pipe(struct drm_i915_private *dev_priv,
936
			enum pipe pipe, bool state)
937
{
938
	int reg;
939
	u32 val;
940
	bool cur_state;
941
 
942
	reg = PIPECONF(pipe);
943
	val = I915_READ(reg);
944
	cur_state = !!(val & PIPECONF_ENABLE);
945
	WARN(cur_state != state,
946
	     "pipe %c assertion failure (expected %s, current %s)\n",
947
	     pipe_name(pipe), state_string(state), state_string(cur_state));
948
}
949
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
950
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
951
 
952
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
953
				 enum plane plane)
954
{
955
	int reg;
956
	u32 val;
957
 
958
	reg = DSPCNTR(plane);
959
	val = I915_READ(reg);
960
	WARN(!(val & DISPLAY_PLANE_ENABLE),
961
	     "plane %c assertion failure, should be active but is disabled\n",
962
	     plane_name(plane));
963
}
964
 
965
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
966
				   enum pipe pipe)
967
{
968
	int reg, i;
969
	u32 val;
970
	int cur_pipe;
971
 
972
	/* Planes are fixed to pipes on ILK+ */
973
	if (HAS_PCH_SPLIT(dev_priv->dev))
974
		return;
975
 
976
	/* Need to check both planes against the pipe */
977
	for (i = 0; i < 2; i++) {
978
		reg = DSPCNTR(i);
979
		val = I915_READ(reg);
980
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
981
			DISPPLANE_SEL_PIPE_SHIFT;
982
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
983
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
984
		     plane_name(i), pipe_name(pipe));
985
	}
986
}
987
 
988
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
989
{
990
	u32 val;
991
	bool enabled;
992
 
993
	val = I915_READ(PCH_DREF_CONTROL);
994
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
995
			    DREF_SUPERSPREAD_SOURCE_MASK));
996
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
997
}
998
 
999
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1000
				       enum pipe pipe)
1001
{
1002
	int reg;
1003
	u32 val;
1004
	bool enabled;
1005
 
1006
	reg = TRANSCONF(pipe);
1007
	val = I915_READ(reg);
1008
	enabled = !!(val & TRANS_ENABLE);
1009
	WARN(enabled,
1010
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1011
	     pipe_name(pipe));
1012
}
1013
 
1014
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1015
			    enum pipe pipe, u32 port_sel, u32 val)
1016
{
1017
	if ((val & DP_PORT_EN) == 0)
1018
		return false;
1019
 
1020
	if (HAS_PCH_CPT(dev_priv->dev)) {
1021
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1022
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1023
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1024
			return false;
1025
	} else {
1026
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1027
			return false;
1028
	}
1029
	return true;
1030
}
1031
 
1032
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1033
			      enum pipe pipe, u32 val)
1034
{
1035
	if ((val & PORT_ENABLE) == 0)
1036
		return false;
1037
 
1038
	if (HAS_PCH_CPT(dev_priv->dev)) {
1039
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1040
			return false;
1041
	} else {
1042
		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1043
			return false;
1044
	}
1045
	return true;
1046
}
1047
 
1048
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1049
			      enum pipe pipe, u32 val)
1050
{
1051
	if ((val & LVDS_PORT_EN) == 0)
1052
		return false;
1053
 
1054
	if (HAS_PCH_CPT(dev_priv->dev)) {
1055
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1056
			return false;
1057
	} else {
1058
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1059
			return false;
1060
	}
1061
	return true;
1062
}
1063
 
1064
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1065
			      enum pipe pipe, u32 val)
1066
{
1067
	if ((val & ADPA_DAC_ENABLE) == 0)
1068
		return false;
1069
	if (HAS_PCH_CPT(dev_priv->dev)) {
1070
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1071
			return false;
1072
	} else {
1073
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1074
			return false;
1075
	}
1076
	return true;
1077
}
1078
 
1079
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1080
				   enum pipe pipe, int reg, u32 port_sel)
1081
{
1082
	u32 val = I915_READ(reg);
1083
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1084
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1085
	     reg, pipe_name(pipe));
1086
}
1087
 
1088
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1089
				     enum pipe pipe, int reg)
1090
{
1091
	u32 val = I915_READ(reg);
1092
	WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1093
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1094
	     reg, pipe_name(pipe));
1095
}
1096
 
1097
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1098
				      enum pipe pipe)
1099
{
1100
	int reg;
1101
	u32 val;
1102
 
1103
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1104
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1105
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1106
 
1107
	reg = PCH_ADPA;
1108
	val = I915_READ(reg);
1109
	WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1110
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1111
	     pipe_name(pipe));
1112
 
1113
	reg = PCH_LVDS;
1114
	val = I915_READ(reg);
1115
	WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1116
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1117
	     pipe_name(pipe));
1118
 
1119
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1120
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1121
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1122
}
1123
 
1124
/**
1125
 * intel_enable_pll - enable a PLL
1126
 * @dev_priv: i915 private structure
1127
 * @pipe: pipe PLL to enable
1128
 *
1129
 * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1130
 * make sure the PLL reg is writable first though, since the panel write
1131
 * protect mechanism may be enabled.
1132
 *
1133
 * Note!  This is for pre-ILK only.
1134
 */
1135
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1136
{
1137
    int reg;
1138
    u32 val;
1139
 
1140
    /* No really, not for ILK+ */
1141
    BUG_ON(dev_priv->info->gen >= 5);
1142
 
1143
    /* PLL is protected by panel, make sure we can write it */
1144
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1145
        assert_panel_unlocked(dev_priv, pipe);
1146
 
1147
    reg = DPLL(pipe);
1148
    val = I915_READ(reg);
1149
    val |= DPLL_VCO_ENABLE;
1150
 
1151
    /* We do this three times for luck */
1152
    I915_WRITE(reg, val);
1153
    POSTING_READ(reg);
1154
    udelay(150); /* wait for warmup */
1155
    I915_WRITE(reg, val);
1156
    POSTING_READ(reg);
1157
    udelay(150); /* wait for warmup */
1158
    I915_WRITE(reg, val);
1159
    POSTING_READ(reg);
1160
    udelay(150); /* wait for warmup */
1161
}
1162
 
1163
/**
1164
 * intel_disable_pll - disable a PLL
1165
 * @dev_priv: i915 private structure
1166
 * @pipe: pipe PLL to disable
1167
 *
1168
 * Disable the PLL for @pipe, making sure the pipe is off first.
1169
 *
1170
 * Note!  This is for pre-ILK only.
1171
 */
1172
static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1173
{
1174
	int reg;
1175
	u32 val;
1176
 
1177
	/* Don't disable pipe A or pipe A PLLs if needed */
1178
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1179
		return;
1180
 
1181
	/* Make sure the pipe isn't still relying on us */
1182
	assert_pipe_disabled(dev_priv, pipe);
1183
 
1184
	reg = DPLL(pipe);
1185
	val = I915_READ(reg);
1186
	val &= ~DPLL_VCO_ENABLE;
1187
	I915_WRITE(reg, val);
1188
	POSTING_READ(reg);
1189
}
1190
 
1191
/**
1192
 * intel_enable_pch_pll - enable PCH PLL
1193
 * @dev_priv: i915 private structure
1194
 * @pipe: pipe PLL to enable
1195
 *
1196
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1197
 * drives the transcoder clock.
1198
 */
1199
static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1200
				 enum pipe pipe)
1201
{
1202
	int reg;
1203
	u32 val;
1204
 
1205
	/* PCH only available on ILK+ */
1206
	BUG_ON(dev_priv->info->gen < 5);
1207
 
1208
	/* PCH refclock must be enabled first */
1209
	assert_pch_refclk_enabled(dev_priv);
1210
 
1211
	reg = PCH_DPLL(pipe);
1212
	val = I915_READ(reg);
1213
	val |= DPLL_VCO_ENABLE;
1214
	I915_WRITE(reg, val);
1215
	POSTING_READ(reg);
1216
	udelay(200);
1217
}
1218
 
1219
static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1220
				  enum pipe pipe)
1221
{
1222
	int reg;
1223
	u32 val;
1224
 
1225
	/* PCH only available on ILK+ */
1226
	BUG_ON(dev_priv->info->gen < 5);
1227
 
1228
	/* Make sure transcoder isn't still depending on us */
1229
	assert_transcoder_disabled(dev_priv, pipe);
1230
 
1231
	reg = PCH_DPLL(pipe);
1232
	val = I915_READ(reg);
1233
	val &= ~DPLL_VCO_ENABLE;
1234
	I915_WRITE(reg, val);
1235
	POSTING_READ(reg);
1236
	udelay(200);
1237
}
1238
 
1239
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1240
				    enum pipe pipe)
1241
{
1242
	int reg;
1243
	u32 val;
1244
 
1245
	/* PCH only available on ILK+ */
1246
	BUG_ON(dev_priv->info->gen < 5);
1247
 
1248
	/* Make sure PCH DPLL is enabled */
1249
	assert_pch_pll_enabled(dev_priv, pipe);
1250
 
1251
	/* FDI must be feeding us bits for PCH ports */
1252
	assert_fdi_tx_enabled(dev_priv, pipe);
1253
	assert_fdi_rx_enabled(dev_priv, pipe);
1254
 
1255
	reg = TRANSCONF(pipe);
1256
	val = I915_READ(reg);
1257
 
1258
	if (HAS_PCH_IBX(dev_priv->dev)) {
1259
		/*
1260
		 * make the BPC in transcoder be consistent with
1261
		 * that in pipeconf reg.
1262
		 */
1263
		val &= ~PIPE_BPC_MASK;
1264
		val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1265
	}
1266
	I915_WRITE(reg, val | TRANS_ENABLE);
1267
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1268
		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1269
}
1270
 
1271
static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1272
				     enum pipe pipe)
1273
{
1274
	int reg;
1275
	u32 val;
1276
 
1277
	/* FDI relies on the transcoder */
1278
	assert_fdi_tx_disabled(dev_priv, pipe);
1279
	assert_fdi_rx_disabled(dev_priv, pipe);
1280
 
1281
	/* Ports must be off as well */
1282
	assert_pch_ports_disabled(dev_priv, pipe);
1283
 
1284
	reg = TRANSCONF(pipe);
1285
	val = I915_READ(reg);
1286
	val &= ~TRANS_ENABLE;
1287
	I915_WRITE(reg, val);
1288
	/* wait for PCH transcoder off, transcoder state */
1289
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1290
		DRM_ERROR("failed to disable transcoder\n");
1291
}
1292
 
1293
/**
1294
 * intel_enable_pipe - enable a pipe, asserting requirements
1295
 * @dev_priv: i915 private structure
1296
 * @pipe: pipe to enable
1297
 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1298
 *
1299
 * Enable @pipe, making sure that various hardware specific requirements
1300
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1301
 *
1302
 * @pipe should be %PIPE_A or %PIPE_B.
1303
 *
1304
 * Will wait until the pipe is actually running (i.e. first vblank) before
1305
 * returning.
1306
 */
1307
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1308
			      bool pch_port)
1309
{
1310
	int reg;
1311
	u32 val;
1312
 
1313
	/*
1314
	 * A pipe without a PLL won't actually be able to drive bits from
1315
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1316
	 * need the check.
1317
	 */
1318
	if (!HAS_PCH_SPLIT(dev_priv->dev))
1319
		assert_pll_enabled(dev_priv, pipe);
1320
	else {
1321
		if (pch_port) {
1322
			/* if driving the PCH, we need FDI enabled */
1323
			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1324
			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1325
		}
1326
		/* FIXME: assert CPU port conditions for SNB+ */
1327
	}
1328
 
1329
	reg = PIPECONF(pipe);
1330
	val = I915_READ(reg);
1331
	if (val & PIPECONF_ENABLE)
1332
		return;
1333
 
1334
	I915_WRITE(reg, val | PIPECONF_ENABLE);
1335
	intel_wait_for_vblank(dev_priv->dev, pipe);
1336
}
1337
 
1338
/**
1339
 * intel_disable_pipe - disable a pipe, asserting requirements
1340
 * @dev_priv: i915 private structure
1341
 * @pipe: pipe to disable
1342
 *
1343
 * Disable @pipe, making sure that various hardware specific requirements
1344
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1345
 *
1346
 * @pipe should be %PIPE_A or %PIPE_B.
1347
 *
1348
 * Will wait until the pipe has shut down before returning.
1349
 */
1350
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1351
			       enum pipe pipe)
1352
{
1353
	int reg;
1354
	u32 val;
1355
 
1356
	/*
1357
	 * Make sure planes won't keep trying to pump pixels to us,
1358
	 * or we might hang the display.
1359
	 */
1360
	assert_planes_disabled(dev_priv, pipe);
1361
 
1362
	/* Don't disable pipe A or pipe A PLLs if needed */
1363
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1364
		return;
1365
 
1366
	reg = PIPECONF(pipe);
1367
	val = I915_READ(reg);
1368
	if ((val & PIPECONF_ENABLE) == 0)
1369
		return;
1370
 
1371
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1372
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1373
}
1374
 
1375
/*
1376
 * Plane regs are double buffered, going from enabled->disabled needs a
1377
 * trigger in order to latch.  The display address reg provides this.
1378
 */
1379
static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1380
				      enum plane plane)
1381
{
1382
	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1383
	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1384
}
1385
 
1386
/**
1387
 * intel_enable_plane - enable a display plane on a given pipe
1388
 * @dev_priv: i915 private structure
1389
 * @plane: plane to enable
1390
 * @pipe: pipe being fed
1391
 *
1392
 * Enable @plane on @pipe, making sure that @pipe is running first.
1393
 */
1394
static void intel_enable_plane(struct drm_i915_private *dev_priv,
1395
			       enum plane plane, enum pipe pipe)
1396
{
1397
	int reg;
1398
	u32 val;
1399
 
1400
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1401
	assert_pipe_enabled(dev_priv, pipe);
1402
 
1403
	reg = DSPCNTR(plane);
1404
	val = I915_READ(reg);
1405
	if (val & DISPLAY_PLANE_ENABLE)
1406
		return;
1407
 
1408
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1409
	intel_flush_display_plane(dev_priv, plane);
1410
	intel_wait_for_vblank(dev_priv->dev, pipe);
1411
}
1412
 
1413
/**
1414
 * intel_disable_plane - disable a display plane
1415
 * @dev_priv: i915 private structure
1416
 * @plane: plane to disable
1417
 * @pipe: pipe consuming the data
1418
 *
1419
 * Disable @plane; should be an independent operation.
1420
 */
1421
static void intel_disable_plane(struct drm_i915_private *dev_priv,
1422
				enum plane plane, enum pipe pipe)
1423
{
1424
	int reg;
1425
	u32 val;
1426
 
1427
	reg = DSPCNTR(plane);
1428
	val = I915_READ(reg);
1429
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1430
		return;
1431
 
1432
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1433
	intel_flush_display_plane(dev_priv, plane);
1434
	intel_wait_for_vblank(dev_priv->dev, pipe);
1435
}
1436
 
1437
static void disable_pch_dp(struct drm_i915_private *dev_priv,
1438
			   enum pipe pipe, int reg, u32 port_sel)
1439
{
1440
	u32 val = I915_READ(reg);
1441
	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1442
		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1443
		I915_WRITE(reg, val & ~DP_PORT_EN);
1444
	}
1445
}
1446
 
1447
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1448
			     enum pipe pipe, int reg)
1449
{
1450
	u32 val = I915_READ(reg);
1451
	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1452
		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1453
			      reg, pipe);
1454
		I915_WRITE(reg, val & ~PORT_ENABLE);
1455
	}
1456
}
1457
 
1458
/* Disable any ports connected to this transcoder */
1459
static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1460
				    enum pipe pipe)
1461
{
1462
	u32 reg, val;
1463
 
1464
	val = I915_READ(PCH_PP_CONTROL);
1465
	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1466
 
1467
	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1468
	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1469
	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1470
 
1471
	reg = PCH_ADPA;
1472
	val = I915_READ(reg);
1473
	if (adpa_pipe_enabled(dev_priv, val, pipe))
1474
		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1475
 
1476
	reg = PCH_LVDS;
1477
	val = I915_READ(reg);
1478
	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1479
		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1480
		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1481
		POSTING_READ(reg);
1482
		udelay(100);
1483
	}
1484
 
1485
	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1486
	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1487
	disable_pch_hdmi(dev_priv, pipe, HDMID);
1488
}
1489
 
1490
static void i8xx_disable_fbc(struct drm_device *dev)
1491
{
1492
    struct drm_i915_private *dev_priv = dev->dev_private;
1493
    u32 fbc_ctl;
1494
 
1495
    /* Disable compression */
1496
    fbc_ctl = I915_READ(FBC_CONTROL);
1497
    if ((fbc_ctl & FBC_CTL_EN) == 0)
1498
        return;
1499
 
1500
    fbc_ctl &= ~FBC_CTL_EN;
1501
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1502
 
1503
    /* Wait for compressing bit to clear */
1504
    if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1505
        DRM_DEBUG_KMS("FBC idle timed out\n");
1506
        return;
1507
    }
1508
 
1509
    DRM_DEBUG_KMS("disabled FBC\n");
1510
}
1511
 
1512
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1513
{
1514
    struct drm_device *dev = crtc->dev;
1515
    struct drm_i915_private *dev_priv = dev->dev_private;
1516
    struct drm_framebuffer *fb = crtc->fb;
1517
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1518
    struct drm_i915_gem_object *obj = intel_fb->obj;
1519
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1520
    int cfb_pitch;
1521
    int plane, i;
1522
    u32 fbc_ctl, fbc_ctl2;
1523
 
1524
    cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1525
    if (fb->pitch < cfb_pitch)
1526
        cfb_pitch = fb->pitch;
1527
 
1528
    /* FBC_CTL wants 64B units */
1529
    cfb_pitch = (cfb_pitch / 64) - 1;
1530
    plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1531
 
1532
    /* Clear old tags */
1533
    for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1534
        I915_WRITE(FBC_TAG + (i * 4), 0);
1535
 
1536
    /* Set it up... */
1537
    fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1538
    fbc_ctl2 |= plane;
1539
    I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1540
    I915_WRITE(FBC_FENCE_OFF, crtc->y);
1541
 
1542
    /* enable it... */
1543
    fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1544
    if (IS_I945GM(dev))
1545
        fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1546
    fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1547
    fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1548
    fbc_ctl |= obj->fence_reg;
1549
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1550
 
1551
    DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1552
              cfb_pitch, crtc->y, intel_crtc->plane);
1553
}
1554
 
1555
static bool i8xx_fbc_enabled(struct drm_device *dev)
1556
{
1557
    struct drm_i915_private *dev_priv = dev->dev_private;
1558
 
1559
    return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1560
}
1561
 
1562
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1563
{
1564
    struct drm_device *dev = crtc->dev;
1565
    struct drm_i915_private *dev_priv = dev->dev_private;
1566
    struct drm_framebuffer *fb = crtc->fb;
1567
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1568
    struct drm_i915_gem_object *obj = intel_fb->obj;
1569
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1570
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1571
    unsigned long stall_watermark = 200;
1572
    u32 dpfc_ctl;
1573
 
1574
    dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1575
    dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1576
    I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1577
 
1578
    I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1579
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1580
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1581
    I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1582
 
1583
    /* enable it... */
1584
    I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1585
 
1586
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1587
}
1588
 
1589
static void g4x_disable_fbc(struct drm_device *dev)
1590
{
1591
    struct drm_i915_private *dev_priv = dev->dev_private;
1592
    u32 dpfc_ctl;
1593
 
1594
    /* Disable compression */
1595
    dpfc_ctl = I915_READ(DPFC_CONTROL);
1596
    if (dpfc_ctl & DPFC_CTL_EN) {
1597
        dpfc_ctl &= ~DPFC_CTL_EN;
1598
        I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1599
 
1600
        DRM_DEBUG_KMS("disabled FBC\n");
1601
    }
1602
}
1603
 
1604
static bool g4x_fbc_enabled(struct drm_device *dev)
1605
{
1606
    struct drm_i915_private *dev_priv = dev->dev_private;
1607
 
1608
    return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1609
}
1610
 
1611
static void sandybridge_blit_fbc_update(struct drm_device *dev)
1612
{
1613
	struct drm_i915_private *dev_priv = dev->dev_private;
1614
	u32 blt_ecoskpd;
1615
 
1616
	/* Make sure blitter notifies FBC of writes */
1617
	gen6_gt_force_wake_get(dev_priv);
1618
	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1619
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1620
		GEN6_BLITTER_LOCK_SHIFT;
1621
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1622
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1623
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1624
	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1625
			 GEN6_BLITTER_LOCK_SHIFT);
1626
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1627
	POSTING_READ(GEN6_BLITTER_ECOSKPD);
1628
	gen6_gt_force_wake_put(dev_priv);
1629
}
1630
 
1631
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1632
{
1633
    struct drm_device *dev = crtc->dev;
1634
    struct drm_i915_private *dev_priv = dev->dev_private;
1635
    struct drm_framebuffer *fb = crtc->fb;
1636
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1637
    struct drm_i915_gem_object *obj = intel_fb->obj;
1638
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1639
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1640
    unsigned long stall_watermark = 200;
1641
    u32 dpfc_ctl;
1642
 
1643
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1644
    dpfc_ctl &= DPFC_RESERVED;
1645
    dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1646
    /* Set persistent mode for front-buffer rendering, ala X. */
1647
    dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1648
    dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1649
    I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1650
 
1651
    I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1652
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1653
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1654
    I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1655
    I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1656
    /* enable it... */
1657
    I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1658
 
1659
    if (IS_GEN6(dev)) {
1660
        I915_WRITE(SNB_DPFC_CTL_SA,
1661
               SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1662
        I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1663
        sandybridge_blit_fbc_update(dev);
1664
    }
1665
 
1666
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1667
}
1668
 
1669
static void ironlake_disable_fbc(struct drm_device *dev)
1670
{
1671
    struct drm_i915_private *dev_priv = dev->dev_private;
1672
    u32 dpfc_ctl;
1673
 
1674
    /* Disable compression */
1675
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1676
    if (dpfc_ctl & DPFC_CTL_EN) {
1677
        dpfc_ctl &= ~DPFC_CTL_EN;
1678
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1679
 
1680
        DRM_DEBUG_KMS("disabled FBC\n");
1681
    }
1682
}
1683
 
1684
static bool ironlake_fbc_enabled(struct drm_device *dev)
1685
{
1686
    struct drm_i915_private *dev_priv = dev->dev_private;
1687
 
1688
    return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1689
}
1690
 
1691
bool intel_fbc_enabled(struct drm_device *dev)
1692
{
1693
	struct drm_i915_private *dev_priv = dev->dev_private;
1694
 
1695
	if (!dev_priv->display.fbc_enabled)
1696
		return false;
1697
 
1698
	return dev_priv->display.fbc_enabled(dev);
1699
}
1700
 
1701
 
1702
 
1703
 
1704
 
1705
 
1706
 
1707
 
1708
 
1709
 
1710
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1711
{
1712
	struct intel_fbc_work *work;
1713
	struct drm_device *dev = crtc->dev;
1714
	struct drm_i915_private *dev_priv = dev->dev_private;
1715
 
1716
	if (!dev_priv->display.enable_fbc)
1717
		return;
1718
 
1719
//	intel_cancel_fbc_work(dev_priv);
1720
 
1721
//	work = kzalloc(sizeof *work, GFP_KERNEL);
1722
//	if (work == NULL) {
1723
//		dev_priv->display.enable_fbc(crtc, interval);
1724
//		return;
1725
//	}
1726
 
1727
//	work->crtc = crtc;
1728
//	work->fb = crtc->fb;
1729
//	work->interval = interval;
1730
//	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1731
 
1732
//	dev_priv->fbc_work = work;
1733
 
1734
	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1735
 
1736
	/* Delay the actual enabling to let pageflipping cease and the
1737
	 * display to settle before starting the compression. Note that
1738
	 * this delay also serves a second purpose: it allows for a
1739
	 * vblank to pass after disabling the FBC before we attempt
1740
	 * to modify the control registers.
1741
	 *
1742
	 * A more complicated solution would involve tracking vblanks
1743
	 * following the termination of the page-flipping sequence
1744
	 * and indeed performing the enable as a co-routine and not
1745
	 * waiting synchronously upon the vblank.
1746
	 */
1747
//	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1748
}
1749
 
1750
void intel_disable_fbc(struct drm_device *dev)
1751
{
1752
	struct drm_i915_private *dev_priv = dev->dev_private;
1753
 
1754
//   intel_cancel_fbc_work(dev_priv);
1755
 
1756
	if (!dev_priv->display.disable_fbc)
1757
		return;
1758
 
1759
	dev_priv->display.disable_fbc(dev);
1760
	dev_priv->cfb_plane = -1;
1761
}
1762
 
1763
/**
1764
 * intel_update_fbc - enable/disable FBC as needed
1765
 * @dev: the drm_device
1766
 *
1767
 * Set up the framebuffer compression hardware at mode set time.  We
1768
 * enable it if possible:
1769
 *   - plane A only (on pre-965)
1770
 *   - no pixel mulitply/line duplication
1771
 *   - no alpha buffer discard
1772
 *   - no dual wide
1773
 *   - framebuffer <= 2048 in width, 1536 in height
1774
 *
1775
 * We can't assume that any compression will take place (worst case),
1776
 * so the compressed buffer has to be the same size as the uncompressed
1777
 * one.  It also must reside (along with the line length buffer) in
1778
 * stolen memory.
1779
 *
1780
 * We need to enable/disable FBC on a global basis.
1781
 */
1782
static void intel_update_fbc(struct drm_device *dev)
1783
{
1784
	struct drm_i915_private *dev_priv = dev->dev_private;
1785
	struct drm_crtc *crtc = NULL, *tmp_crtc;
1786
	struct intel_crtc *intel_crtc;
1787
	struct drm_framebuffer *fb;
1788
	struct intel_framebuffer *intel_fb;
1789
	struct drm_i915_gem_object *obj;
1790
 
1791
	DRM_DEBUG_KMS("\n");
1792
 
1793
	if (!i915_powersave)
1794
		return;
1795
 
1796
	if (!I915_HAS_FBC(dev))
1797
		return;
1798
 
1799
	/*
1800
	 * If FBC is already on, we just have to verify that we can
1801
	 * keep it that way...
1802
	 * Need to disable if:
1803
	 *   - more than one pipe is active
1804
	 *   - changing FBC params (stride, fence, mode)
1805
	 *   - new fb is too large to fit in compressed buffer
1806
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
1807
	 */
1808
	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1809
		if (tmp_crtc->enabled && tmp_crtc->fb) {
1810
			if (crtc) {
1811
				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1812
//				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1813
				goto out_disable;
1814
			}
1815
			crtc = tmp_crtc;
1816
		}
1817
	}
1818
 
1819
	if (!crtc || crtc->fb == NULL) {
1820
		DRM_DEBUG_KMS("no output, disabling\n");
1821
//		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1822
		goto out_disable;
1823
	}
1824
 
1825
	intel_crtc = to_intel_crtc(crtc);
1826
	fb = crtc->fb;
1827
	intel_fb = to_intel_framebuffer(fb);
1828
	obj = intel_fb->obj;
1829
 
1830
	if (!i915_enable_fbc) {
1831
		DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
1832
//		dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1833
		goto out_disable;
1834
	}
1835
	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1836
		DRM_DEBUG_KMS("framebuffer too large, disabling "
1837
			      "compression\n");
1838
//		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1839
		goto out_disable;
1840
	}
1841
	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1842
	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1843
		DRM_DEBUG_KMS("mode incompatible with compression, "
1844
			      "disabling\n");
1845
//		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1846
		goto out_disable;
1847
	}
1848
	if ((crtc->mode.hdisplay > 2048) ||
1849
	    (crtc->mode.vdisplay > 1536)) {
1850
		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1851
//		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1852
		goto out_disable;
1853
	}
1854
	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1855
		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1856
//		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1857
		goto out_disable;
1858
	}
1859
 
1860
	/* The use of a CPU fence is mandatory in order to detect writes
1861
	 * by the CPU to the scanout and trigger updates to the FBC.
1862
	 */
1863
//	if (obj->tiling_mode != I915_TILING_X ||
1864
//	    obj->fence_reg == I915_FENCE_REG_NONE) {
1865
//		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1866
//		dev_priv->no_fbc_reason = FBC_NOT_TILED;
1867
//		goto out_disable;
1868
//	}
1869
 
1870
	/* If the kernel debugger is active, always disable compression */
1871
	if (in_dbg_master())
1872
		goto out_disable;
1873
 
1874
	/* If the scanout has not changed, don't modify the FBC settings.
1875
	 * Note that we make the fundamental assumption that the fb->obj
1876
	 * cannot be unpinned (and have its GTT offset and fence revoked)
1877
	 * without first being decoupled from the scanout and FBC disabled.
1878
	 */
1879
	if (dev_priv->cfb_plane == intel_crtc->plane &&
1880
	    dev_priv->cfb_fb == fb->base.id &&
1881
	    dev_priv->cfb_y == crtc->y)
1882
		return;
1883
 
1884
	if (intel_fbc_enabled(dev)) {
1885
		/* We update FBC along two paths, after changing fb/crtc
1886
		 * configuration (modeswitching) and after page-flipping
1887
		 * finishes. For the latter, we know that not only did
1888
		 * we disable the FBC at the start of the page-flip
1889
		 * sequence, but also more than one vblank has passed.
1890
		 *
1891
		 * For the former case of modeswitching, it is possible
1892
		 * to switch between two FBC valid configurations
1893
		 * instantaneously so we do need to disable the FBC
1894
		 * before we can modify its control registers. We also
1895
		 * have to wait for the next vblank for that to take
1896
		 * effect. However, since we delay enabling FBC we can
1897
		 * assume that a vblank has passed since disabling and
1898
		 * that we can safely alter the registers in the deferred
1899
		 * callback.
1900
		 *
1901
		 * In the scenario that we go from a valid to invalid
1902
		 * and then back to valid FBC configuration we have
1903
		 * no strict enforcement that a vblank occurred since
1904
		 * disabling the FBC. However, along all current pipe
1905
		 * disabling paths we do need to wait for a vblank at
1906
		 * some point. And we wait before enabling FBC anyway.
1907
		 */
1908
		DRM_DEBUG_KMS("disabling active FBC for update\n");
1909
		intel_disable_fbc(dev);
1910
	}
1911
 
1912
	intel_enable_fbc(crtc, 500);
1913
	return;
1914
 
1915
out_disable:
1916
	/* Multiple disables should be harmless */
1917
	if (intel_fbc_enabled(dev)) {
1918
		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1919
		intel_disable_fbc(dev);
1920
	}
1921
}
1922
 
1923
 
1924
 
1925
 
1926
 
1927
 
1928
 
1929
 
1930
 
1931
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1932
                 int x, int y)
1933
{
1934
    struct drm_device *dev = crtc->dev;
1935
    struct drm_i915_private *dev_priv = dev->dev_private;
1936
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1937
    struct intel_framebuffer *intel_fb;
1938
    struct drm_i915_gem_object *obj;
1939
    int plane = intel_crtc->plane;
1940
    unsigned long Start, Offset;
1941
    u32 dspcntr;
1942
    u32 reg;
1943
 
1944
    switch (plane) {
1945
    case 0:
1946
    case 1:
1947
        break;
1948
    default:
1949
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1950
        return -EINVAL;
1951
    }
1952
 
1953
    intel_fb = to_intel_framebuffer(fb);
1954
    obj = intel_fb->obj;
1955
 
1956
    reg = DSPCNTR(plane);
1957
    dspcntr = I915_READ(reg);
1958
    /* Mask out pixel format bits in case we change it */
1959
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1960
    switch (fb->bits_per_pixel) {
1961
    case 8:
1962
        dspcntr |= DISPPLANE_8BPP;
1963
        break;
1964
    case 16:
1965
        if (fb->depth == 15)
1966
            dspcntr |= DISPPLANE_15_16BPP;
1967
        else
1968
            dspcntr |= DISPPLANE_16BPP;
1969
        break;
1970
    case 24:
1971
    case 32:
1972
        dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1973
        break;
1974
    default:
1975
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1976
        return -EINVAL;
1977
    }
1978
    if (INTEL_INFO(dev)->gen >= 4) {
1979
        if (obj->tiling_mode != I915_TILING_NONE)
1980
            dspcntr |= DISPPLANE_TILED;
1981
        else
1982
            dspcntr &= ~DISPPLANE_TILED;
1983
    }
1984
 
1985
    I915_WRITE(reg, dspcntr);
1986
 
1987
    Start = obj->gtt_offset;
1988
    Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
1989
 
1990
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1991
              Start, Offset, x, y, fb->pitch);
1992
    I915_WRITE(DSPSTRIDE(plane), fb->pitch);
1993
    if (INTEL_INFO(dev)->gen >= 4) {
1994
        I915_WRITE(DSPSURF(plane), Start);
1995
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1996
        I915_WRITE(DSPADDR(plane), Offset);
1997
    } else
1998
        I915_WRITE(DSPADDR(plane), Start + Offset);
1999
    POSTING_READ(reg);
2000
 
2001
    return 0;
2002
}
2003
 
2004
static int ironlake_update_plane(struct drm_crtc *crtc,
2005
                 struct drm_framebuffer *fb, int x, int y)
2006
{
2007
    struct drm_device *dev = crtc->dev;
2008
    struct drm_i915_private *dev_priv = dev->dev_private;
2009
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2010
    struct intel_framebuffer *intel_fb;
2011
    struct drm_i915_gem_object *obj;
2012
    int plane = intel_crtc->plane;
2013
    unsigned long Start, Offset;
2014
    u32 dspcntr;
2015
    u32 reg;
2016
 
2017
    switch (plane) {
2018
    case 0:
2019
    case 1:
2020
        break;
2021
    default:
2022
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2023
        return -EINVAL;
2024
    }
2025
 
2026
    intel_fb = to_intel_framebuffer(fb);
2027
    obj = intel_fb->obj;
2028
 
2029
    reg = DSPCNTR(plane);
2030
    dspcntr = I915_READ(reg);
2031
    /* Mask out pixel format bits in case we change it */
2032
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2033
    switch (fb->bits_per_pixel) {
2034
    case 8:
2035
        dspcntr |= DISPPLANE_8BPP;
2036
        break;
2037
    case 16:
2038
        if (fb->depth != 16)
2039
            return -EINVAL;
2040
 
2041
        dspcntr |= DISPPLANE_16BPP;
2042
        break;
2043
    case 24:
2044
    case 32:
2045
        if (fb->depth == 24)
2046
            dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2047
        else if (fb->depth == 30)
2048
            dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2049
        else
2050
            return -EINVAL;
2051
        break;
2052
    default:
2053
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2054
        return -EINVAL;
2055
    }
2056
 
2057
//    if (obj->tiling_mode != I915_TILING_NONE)
2058
//        dspcntr |= DISPPLANE_TILED;
2059
//    else
2060
        dspcntr &= ~DISPPLANE_TILED;
2061
 
2062
    /* must disable */
2063
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2064
 
2065
    I915_WRITE(reg, dspcntr);
2066
 
2067
//    Start = obj->gtt_offset;
2068
//    Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2069
 
2070
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2071
              Start, Offset, x, y, fb->pitch);
2330 Serge 2072
	I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2073
	I915_WRITE(DSPSURF(plane), Start);
2074
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2075
	I915_WRITE(DSPADDR(plane), Offset);
2076
	POSTING_READ(reg);
2327 Serge 2077
 
2078
    return 0;
2079
}
2080
 
2081
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2082
static int
2083
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2084
			   int x, int y, enum mode_set_atomic state)
2085
{
2086
	struct drm_device *dev = crtc->dev;
2087
	struct drm_i915_private *dev_priv = dev->dev_private;
2088
	int ret;
2089
 
2090
	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2091
	if (ret)
2092
		return ret;
2093
 
2094
	intel_update_fbc(dev);
2095
	intel_increase_pllclock(crtc);
2096
 
2097
	return 0;
2098
}
2099
 
2100
static int
2101
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2102
		    struct drm_framebuffer *old_fb)
2103
{
2104
	struct drm_device *dev = crtc->dev;
2105
	struct drm_i915_master_private *master_priv;
2106
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2107
	int ret;
2108
 
2109
	/* no fb bound */
2110
	if (!crtc->fb) {
2111
		DRM_ERROR("No FB bound\n");
2112
		return 0;
2113
	}
2114
 
2115
	switch (intel_crtc->plane) {
2116
	case 0:
2117
	case 1:
2118
		break;
2119
	default:
2120
		DRM_ERROR("no plane for crtc\n");
2121
		return -EINVAL;
2122
	}
2123
 
2124
	mutex_lock(&dev->struct_mutex);
2125
//   ret = intel_pin_and_fence_fb_obj(dev,
2126
//                    to_intel_framebuffer(crtc->fb)->obj,
2127
//                    NULL);
2128
	if (ret != 0) {
2129
		mutex_unlock(&dev->struct_mutex);
2130
		DRM_ERROR("pin & fence failed\n");
2131
		return ret;
2132
	}
2133
 
2134
	if (old_fb) {
2135
		struct drm_i915_private *dev_priv = dev->dev_private;
2136
		struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2137
 
2138
//		wait_event(dev_priv->pending_flip_queue,
2139
//			   atomic_read(&dev_priv->mm.wedged) ||
2140
//			   atomic_read(&obj->pending_flip) == 0);
2141
 
2142
		/* Big Hammer, we also need to ensure that any pending
2143
		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2144
		 * current scanout is retired before unpinning the old
2145
		 * framebuffer.
2146
		 *
2147
		 * This should only fail upon a hung GPU, in which case we
2148
		 * can safely continue.
2149
		 */
2150
//       ret = i915_gem_object_finish_gpu(obj);
2151
		(void) ret;
2152
	}
2153
 
2154
	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2155
					 LEAVE_ATOMIC_MODE_SET);
2156
	if (ret) {
2157
//       i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2158
		mutex_unlock(&dev->struct_mutex);
2159
		DRM_ERROR("failed to update base address\n");
2160
		return ret;
2161
	}
2162
 
2163
	if (old_fb) {
2164
//       intel_wait_for_vblank(dev, intel_crtc->pipe);
2165
//       i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
2166
	}
2167
 
2168
	mutex_unlock(&dev->struct_mutex);
2330 Serge 2169
#if 0
2170
	if (!dev->primary->master)
2171
		return 0;
2327 Serge 2172
 
2330 Serge 2173
	master_priv = dev->primary->master->driver_priv;
2174
	if (!master_priv->sarea_priv)
2175
		return 0;
2327 Serge 2176
 
2330 Serge 2177
	if (intel_crtc->pipe) {
2178
		master_priv->sarea_priv->pipeB_x = x;
2179
		master_priv->sarea_priv->pipeB_y = y;
2180
	} else {
2181
		master_priv->sarea_priv->pipeA_x = x;
2182
		master_priv->sarea_priv->pipeA_y = y;
2183
	}
2184
#endif
2327 Serge 2185
	return 0;
2186
}
2187
 
2188
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2189
{
2190
	struct drm_device *dev = crtc->dev;
2191
	struct drm_i915_private *dev_priv = dev->dev_private;
2192
	u32 dpa_ctl;
2193
 
2194
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2195
	dpa_ctl = I915_READ(DP_A);
2196
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2197
 
2198
	if (clock < 200000) {
2199
		u32 temp;
2200
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2201
		/* workaround for 160Mhz:
2202
		   1) program 0x4600c bits 15:0 = 0x8124
2203
		   2) program 0x46010 bit 0 = 1
2204
		   3) program 0x46034 bit 24 = 1
2205
		   4) program 0x64000 bit 14 = 1
2206
		   */
2207
		temp = I915_READ(0x4600c);
2208
		temp &= 0xffff0000;
2209
		I915_WRITE(0x4600c, temp | 0x8124);
2210
 
2211
		temp = I915_READ(0x46010);
2212
		I915_WRITE(0x46010, temp | 1);
2213
 
2214
		temp = I915_READ(0x46034);
2215
		I915_WRITE(0x46034, temp | (1 << 24));
2216
	} else {
2217
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2218
	}
2219
	I915_WRITE(DP_A, dpa_ctl);
2220
 
2221
	POSTING_READ(DP_A);
2222
	udelay(500);
2223
}
2224
 
2225
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2226
{
2227
	struct drm_device *dev = crtc->dev;
2228
	struct drm_i915_private *dev_priv = dev->dev_private;
2229
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2230
	int pipe = intel_crtc->pipe;
2231
	u32 reg, temp;
2232
 
2233
	/* enable normal train */
2234
	reg = FDI_TX_CTL(pipe);
2235
	temp = I915_READ(reg);
2236
	if (IS_IVYBRIDGE(dev)) {
2237
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2238
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2239
	} else {
2240
		temp &= ~FDI_LINK_TRAIN_NONE;
2241
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2242
	}
2243
	I915_WRITE(reg, temp);
2244
 
2245
	reg = FDI_RX_CTL(pipe);
2246
	temp = I915_READ(reg);
2247
	if (HAS_PCH_CPT(dev)) {
2248
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2249
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2250
	} else {
2251
		temp &= ~FDI_LINK_TRAIN_NONE;
2252
		temp |= FDI_LINK_TRAIN_NONE;
2253
	}
2254
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2255
 
2256
	/* wait one idle pattern time */
2257
	POSTING_READ(reg);
2258
	udelay(1000);
2259
 
2260
	/* IVB wants error correction enabled */
2261
	if (IS_IVYBRIDGE(dev))
2262
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2263
			   FDI_FE_ERRC_ENABLE);
2264
}
2265
 
2266
static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2267
{
2268
	struct drm_i915_private *dev_priv = dev->dev_private;
2269
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2270
 
2271
	flags |= FDI_PHASE_SYNC_OVR(pipe);
2272
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2273
	flags |= FDI_PHASE_SYNC_EN(pipe);
2274
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2275
	POSTING_READ(SOUTH_CHICKEN1);
2276
}
2277
 
2278
/* The FDI link training functions for ILK/Ibexpeak. */
2279
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2280
{
2281
    struct drm_device *dev = crtc->dev;
2282
    struct drm_i915_private *dev_priv = dev->dev_private;
2283
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2284
    int pipe = intel_crtc->pipe;
2285
    int plane = intel_crtc->plane;
2286
    u32 reg, temp, tries;
2287
 
2288
    /* FDI needs bits from pipe & plane first */
2289
    assert_pipe_enabled(dev_priv, pipe);
2290
    assert_plane_enabled(dev_priv, plane);
2291
 
2292
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2293
       for train result */
2294
    reg = FDI_RX_IMR(pipe);
2295
    temp = I915_READ(reg);
2296
    temp &= ~FDI_RX_SYMBOL_LOCK;
2297
    temp &= ~FDI_RX_BIT_LOCK;
2298
    I915_WRITE(reg, temp);
2299
    I915_READ(reg);
2300
    udelay(150);
2301
 
2302
    /* enable CPU FDI TX and PCH FDI RX */
2303
    reg = FDI_TX_CTL(pipe);
2304
    temp = I915_READ(reg);
2305
    temp &= ~(7 << 19);
2306
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2307
    temp &= ~FDI_LINK_TRAIN_NONE;
2308
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2309
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2310
 
2311
    reg = FDI_RX_CTL(pipe);
2312
    temp = I915_READ(reg);
2313
    temp &= ~FDI_LINK_TRAIN_NONE;
2314
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2315
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2316
 
2317
    POSTING_READ(reg);
2318
    udelay(150);
2319
 
2320
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2321
    if (HAS_PCH_IBX(dev)) {
2322
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2323
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2324
               FDI_RX_PHASE_SYNC_POINTER_EN);
2325
    }
2326
 
2327
    reg = FDI_RX_IIR(pipe);
2328
    for (tries = 0; tries < 5; tries++) {
2329
        temp = I915_READ(reg);
2330
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2331
 
2332
        if ((temp & FDI_RX_BIT_LOCK)) {
2333
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2334
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2335
            break;
2336
        }
2337
    }
2338
    if (tries == 5)
2339
        DRM_ERROR("FDI train 1 fail!\n");
2340
 
2341
    /* Train 2 */
2342
    reg = FDI_TX_CTL(pipe);
2343
    temp = I915_READ(reg);
2344
    temp &= ~FDI_LINK_TRAIN_NONE;
2345
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2346
    I915_WRITE(reg, temp);
2347
 
2348
    reg = FDI_RX_CTL(pipe);
2349
    temp = I915_READ(reg);
2350
    temp &= ~FDI_LINK_TRAIN_NONE;
2351
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2352
    I915_WRITE(reg, temp);
2353
 
2354
    POSTING_READ(reg);
2355
    udelay(150);
2356
 
2357
    reg = FDI_RX_IIR(pipe);
2358
    for (tries = 0; tries < 5; tries++) {
2359
        temp = I915_READ(reg);
2360
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2361
 
2362
        if (temp & FDI_RX_SYMBOL_LOCK) {
2363
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2364
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2365
            break;
2366
        }
2367
    }
2368
    if (tries == 5)
2369
        DRM_ERROR("FDI train 2 fail!\n");
2370
 
2371
    DRM_DEBUG_KMS("FDI train done\n");
2372
 
2373
}
2374
 
2375
static const int snb_b_fdi_train_param [] = {
2376
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2377
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2378
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2379
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2380
};
2381
 
2382
/* The FDI link training functions for SNB/Cougarpoint. */
2383
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2384
{
2385
    struct drm_device *dev = crtc->dev;
2386
    struct drm_i915_private *dev_priv = dev->dev_private;
2387
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2388
    int pipe = intel_crtc->pipe;
2389
    u32 reg, temp, i;
2390
 
2391
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2392
       for train result */
2393
    reg = FDI_RX_IMR(pipe);
2394
    temp = I915_READ(reg);
2395
    temp &= ~FDI_RX_SYMBOL_LOCK;
2396
    temp &= ~FDI_RX_BIT_LOCK;
2397
    I915_WRITE(reg, temp);
2398
 
2399
    POSTING_READ(reg);
2400
    udelay(150);
2401
 
2402
    /* enable CPU FDI TX and PCH FDI RX */
2403
    reg = FDI_TX_CTL(pipe);
2404
    temp = I915_READ(reg);
2405
    temp &= ~(7 << 19);
2406
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2407
    temp &= ~FDI_LINK_TRAIN_NONE;
2408
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2409
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2410
    /* SNB-B */
2411
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2412
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2413
 
2414
    reg = FDI_RX_CTL(pipe);
2415
    temp = I915_READ(reg);
2416
    if (HAS_PCH_CPT(dev)) {
2417
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2418
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2419
    } else {
2420
        temp &= ~FDI_LINK_TRAIN_NONE;
2421
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2422
    }
2423
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2424
 
2425
    POSTING_READ(reg);
2426
    udelay(150);
2427
 
2428
    if (HAS_PCH_CPT(dev))
2429
        cpt_phase_pointer_enable(dev, pipe);
2430
 
2431
    for (i = 0; i < 4; i++ ) {
2432
        reg = FDI_TX_CTL(pipe);
2433
        temp = I915_READ(reg);
2434
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2435
        temp |= snb_b_fdi_train_param[i];
2436
        I915_WRITE(reg, temp);
2437
 
2438
        POSTING_READ(reg);
2439
        udelay(500);
2440
 
2441
        reg = FDI_RX_IIR(pipe);
2442
        temp = I915_READ(reg);
2443
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2444
 
2445
        if (temp & FDI_RX_BIT_LOCK) {
2446
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2447
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2448
            break;
2449
        }
2450
    }
2451
    if (i == 4)
2452
        DRM_ERROR("FDI train 1 fail!\n");
2453
 
2454
    /* Train 2 */
2455
    reg = FDI_TX_CTL(pipe);
2456
    temp = I915_READ(reg);
2457
    temp &= ~FDI_LINK_TRAIN_NONE;
2458
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2459
    if (IS_GEN6(dev)) {
2460
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2461
        /* SNB-B */
2462
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2463
    }
2464
    I915_WRITE(reg, temp);
2465
 
2466
    reg = FDI_RX_CTL(pipe);
2467
    temp = I915_READ(reg);
2468
    if (HAS_PCH_CPT(dev)) {
2469
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2470
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2471
    } else {
2472
        temp &= ~FDI_LINK_TRAIN_NONE;
2473
        temp |= FDI_LINK_TRAIN_PATTERN_2;
2474
    }
2475
    I915_WRITE(reg, temp);
2476
 
2477
    POSTING_READ(reg);
2478
    udelay(150);
2479
 
2480
    for (i = 0; i < 4; i++ ) {
2481
        reg = FDI_TX_CTL(pipe);
2482
        temp = I915_READ(reg);
2483
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2484
        temp |= snb_b_fdi_train_param[i];
2485
        I915_WRITE(reg, temp);
2486
 
2487
        POSTING_READ(reg);
2488
        udelay(500);
2489
 
2490
        reg = FDI_RX_IIR(pipe);
2491
        temp = I915_READ(reg);
2492
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2493
 
2494
        if (temp & FDI_RX_SYMBOL_LOCK) {
2495
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2496
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2497
            break;
2498
        }
2499
    }
2500
    if (i == 4)
2501
        DRM_ERROR("FDI train 2 fail!\n");
2502
 
2503
    DRM_DEBUG_KMS("FDI train done.\n");
2504
}
2505
 
2506
/* Manual link training for Ivy Bridge A0 parts */
2507
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2508
{
2509
    struct drm_device *dev = crtc->dev;
2510
    struct drm_i915_private *dev_priv = dev->dev_private;
2511
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2512
    int pipe = intel_crtc->pipe;
2513
    u32 reg, temp, i;
2514
 
2515
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2516
       for train result */
2517
    reg = FDI_RX_IMR(pipe);
2518
    temp = I915_READ(reg);
2519
    temp &= ~FDI_RX_SYMBOL_LOCK;
2520
    temp &= ~FDI_RX_BIT_LOCK;
2521
    I915_WRITE(reg, temp);
2522
 
2523
    POSTING_READ(reg);
2524
    udelay(150);
2525
 
2526
    /* enable CPU FDI TX and PCH FDI RX */
2527
    reg = FDI_TX_CTL(pipe);
2528
    temp = I915_READ(reg);
2529
    temp &= ~(7 << 19);
2530
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2531
    temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2532
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2533
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2534
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2535
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2536
 
2537
    reg = FDI_RX_CTL(pipe);
2538
    temp = I915_READ(reg);
2539
    temp &= ~FDI_LINK_TRAIN_AUTO;
2540
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2541
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2542
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2543
 
2544
    POSTING_READ(reg);
2545
    udelay(150);
2546
 
2547
    if (HAS_PCH_CPT(dev))
2548
        cpt_phase_pointer_enable(dev, pipe);
2549
 
2550
    for (i = 0; i < 4; i++ ) {
2551
        reg = FDI_TX_CTL(pipe);
2552
        temp = I915_READ(reg);
2553
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2554
        temp |= snb_b_fdi_train_param[i];
2555
        I915_WRITE(reg, temp);
2556
 
2557
        POSTING_READ(reg);
2558
        udelay(500);
2559
 
2560
        reg = FDI_RX_IIR(pipe);
2561
        temp = I915_READ(reg);
2562
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2563
 
2564
        if (temp & FDI_RX_BIT_LOCK ||
2565
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2566
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2567
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2568
            break;
2569
        }
2570
    }
2571
    if (i == 4)
2572
        DRM_ERROR("FDI train 1 fail!\n");
2573
 
2574
    /* Train 2 */
2575
    reg = FDI_TX_CTL(pipe);
2576
    temp = I915_READ(reg);
2577
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2578
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2579
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2580
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2581
    I915_WRITE(reg, temp);
2582
 
2583
    reg = FDI_RX_CTL(pipe);
2584
    temp = I915_READ(reg);
2585
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2586
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2587
    I915_WRITE(reg, temp);
2588
 
2589
    POSTING_READ(reg);
2590
    udelay(150);
2591
 
2592
    for (i = 0; i < 4; i++ ) {
2593
        reg = FDI_TX_CTL(pipe);
2594
        temp = I915_READ(reg);
2595
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2596
        temp |= snb_b_fdi_train_param[i];
2597
        I915_WRITE(reg, temp);
2598
 
2599
        POSTING_READ(reg);
2600
        udelay(500);
2601
 
2602
        reg = FDI_RX_IIR(pipe);
2603
        temp = I915_READ(reg);
2604
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2605
 
2606
        if (temp & FDI_RX_SYMBOL_LOCK) {
2607
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2608
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2609
            break;
2610
        }
2611
    }
2612
    if (i == 4)
2613
        DRM_ERROR("FDI train 2 fail!\n");
2614
 
2615
    DRM_DEBUG_KMS("FDI train done.\n");
2616
}
2617
 
2618
static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2619
{
2620
	struct drm_device *dev = crtc->dev;
2621
	struct drm_i915_private *dev_priv = dev->dev_private;
2622
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2623
	int pipe = intel_crtc->pipe;
2624
	u32 reg, temp;
2625
 
2626
	/* Write the TU size bits so error detection works */
2627
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2628
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2629
 
2630
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2631
	reg = FDI_RX_CTL(pipe);
2632
	temp = I915_READ(reg);
2633
	temp &= ~((0x7 << 19) | (0x7 << 16));
2634
	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2635
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2636
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2637
 
2638
	POSTING_READ(reg);
2639
	udelay(200);
2640
 
2641
	/* Switch from Rawclk to PCDclk */
2642
	temp = I915_READ(reg);
2643
	I915_WRITE(reg, temp | FDI_PCDCLK);
2644
 
2645
	POSTING_READ(reg);
2646
	udelay(200);
2647
 
2648
	/* Enable CPU FDI TX PLL, always on for Ironlake */
2649
	reg = FDI_TX_CTL(pipe);
2650
	temp = I915_READ(reg);
2651
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2652
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2653
 
2654
		POSTING_READ(reg);
2655
		udelay(100);
2656
	}
2657
}
2658
 
2659
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2660
{
2661
	struct drm_i915_private *dev_priv = dev->dev_private;
2662
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2663
 
2664
	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2665
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2666
	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2667
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2668
	POSTING_READ(SOUTH_CHICKEN1);
2669
}
2670
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2671
{
2672
	struct drm_device *dev = crtc->dev;
2673
	struct drm_i915_private *dev_priv = dev->dev_private;
2674
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2675
	int pipe = intel_crtc->pipe;
2676
	u32 reg, temp;
2677
 
2678
	/* disable CPU FDI tx and PCH FDI rx */
2679
	reg = FDI_TX_CTL(pipe);
2680
	temp = I915_READ(reg);
2681
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2682
	POSTING_READ(reg);
2683
 
2684
	reg = FDI_RX_CTL(pipe);
2685
	temp = I915_READ(reg);
2686
	temp &= ~(0x7 << 16);
2687
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2688
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2689
 
2690
	POSTING_READ(reg);
2691
	udelay(100);
2692
 
2693
	/* Ironlake workaround, disable clock pointer after downing FDI */
2694
	if (HAS_PCH_IBX(dev)) {
2695
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2696
		I915_WRITE(FDI_RX_CHICKEN(pipe),
2697
			   I915_READ(FDI_RX_CHICKEN(pipe) &
2698
				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2699
	} else if (HAS_PCH_CPT(dev)) {
2700
		cpt_phase_pointer_disable(dev, pipe);
2701
	}
2702
 
2703
	/* still set train pattern 1 */
2704
	reg = FDI_TX_CTL(pipe);
2705
	temp = I915_READ(reg);
2706
	temp &= ~FDI_LINK_TRAIN_NONE;
2707
	temp |= FDI_LINK_TRAIN_PATTERN_1;
2708
	I915_WRITE(reg, temp);
2709
 
2710
	reg = FDI_RX_CTL(pipe);
2711
	temp = I915_READ(reg);
2712
	if (HAS_PCH_CPT(dev)) {
2713
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2714
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2715
	} else {
2716
		temp &= ~FDI_LINK_TRAIN_NONE;
2717
		temp |= FDI_LINK_TRAIN_PATTERN_1;
2718
	}
2719
	/* BPC in FDI rx is consistent with that in PIPECONF */
2720
	temp &= ~(0x07 << 16);
2721
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2722
	I915_WRITE(reg, temp);
2723
 
2724
	POSTING_READ(reg);
2725
	udelay(100);
2726
}
2727
 
2728
/*
2729
 * When we disable a pipe, we need to clear any pending scanline wait events
2730
 * to avoid hanging the ring, which we assume we are waiting on.
2731
 */
2732
static void intel_clear_scanline_wait(struct drm_device *dev)
2733
{
2734
	struct drm_i915_private *dev_priv = dev->dev_private;
2735
	struct intel_ring_buffer *ring;
2736
	u32 tmp;
2737
 
2738
	if (IS_GEN2(dev))
2739
		/* Can't break the hang on i8xx */
2740
		return;
2741
 
2742
	ring = LP_RING(dev_priv);
2743
	tmp = I915_READ_CTL(ring);
2744
	if (tmp & RING_WAIT)
2745
		I915_WRITE_CTL(ring, tmp);
2746
}
2747
 
2748
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2749
{
2750
	struct drm_i915_gem_object *obj;
2751
	struct drm_i915_private *dev_priv;
2752
 
2753
	if (crtc->fb == NULL)
2754
		return;
2755
 
2756
	obj = to_intel_framebuffer(crtc->fb)->obj;
2757
	dev_priv = crtc->dev->dev_private;
2758
//	wait_event(dev_priv->pending_flip_queue,
2759
//		   atomic_read(&obj->pending_flip) == 0);
2760
}
2761
 
2762
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2763
{
2764
	struct drm_device *dev = crtc->dev;
2765
	struct drm_mode_config *mode_config = &dev->mode_config;
2766
	struct intel_encoder *encoder;
2767
 
2768
	/*
2769
	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2770
	 * must be driven by its own crtc; no sharing is possible.
2771
	 */
2772
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2773
		if (encoder->base.crtc != crtc)
2774
			continue;
2775
 
2776
		switch (encoder->type) {
2777
		case INTEL_OUTPUT_EDP:
2778
			if (!intel_encoder_is_pch_edp(&encoder->base))
2779
				return false;
2780
			continue;
2781
		}
2782
	}
2783
 
2784
	return true;
2785
}
2786
 
2787
/*
2788
 * Enable PCH resources required for PCH ports:
2789
 *   - PCH PLLs
2790
 *   - FDI training & RX/TX
2791
 *   - update transcoder timings
2792
 *   - DP transcoding bits
2793
 *   - transcoder
2794
 */
2795
static void ironlake_pch_enable(struct drm_crtc *crtc)
2796
{
2797
	struct drm_device *dev = crtc->dev;
2798
	struct drm_i915_private *dev_priv = dev->dev_private;
2799
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2800
	int pipe = intel_crtc->pipe;
2801
	u32 reg, temp;
2802
 
2803
	/* For PCH output, training FDI link */
2804
	dev_priv->display.fdi_link_train(crtc);
2805
 
2806
	intel_enable_pch_pll(dev_priv, pipe);
2807
 
2808
	if (HAS_PCH_CPT(dev)) {
2809
		/* Be sure PCH DPLL SEL is set */
2810
		temp = I915_READ(PCH_DPLL_SEL);
2811
		if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
2812
			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2813
		else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
2814
			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2815
		I915_WRITE(PCH_DPLL_SEL, temp);
2816
	}
2817
 
2818
	/* set transcoder timing, panel must allow it */
2819
	assert_panel_unlocked(dev_priv, pipe);
2820
	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2821
	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2822
	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2823
 
2824
	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2825
	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2826
	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2827
 
2828
	intel_fdi_normal_train(crtc);
2829
 
2830
	/* For PCH DP, enable TRANS_DP_CTL */
2831
	if (HAS_PCH_CPT(dev) &&
2832
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2833
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2834
		reg = TRANS_DP_CTL(pipe);
2835
		temp = I915_READ(reg);
2836
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2837
			  TRANS_DP_SYNC_MASK |
2838
			  TRANS_DP_BPC_MASK);
2839
		temp |= (TRANS_DP_OUTPUT_ENABLE |
2840
			 TRANS_DP_ENH_FRAMING);
2841
		temp |= bpc << 9; /* same format but at 11:9 */
2842
 
2843
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2844
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2845
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2846
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2847
 
2848
		switch (intel_trans_dp_port_sel(crtc)) {
2849
		case PCH_DP_B:
2850
			temp |= TRANS_DP_PORT_SEL_B;
2851
			break;
2852
		case PCH_DP_C:
2853
			temp |= TRANS_DP_PORT_SEL_C;
2854
			break;
2855
		case PCH_DP_D:
2856
			temp |= TRANS_DP_PORT_SEL_D;
2857
			break;
2858
		default:
2859
			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2860
			temp |= TRANS_DP_PORT_SEL_B;
2861
			break;
2862
		}
2863
 
2864
		I915_WRITE(reg, temp);
2865
	}
2866
 
2867
	intel_enable_transcoder(dev_priv, pipe);
2868
}
2869
 
2870
static void ironlake_crtc_enable(struct drm_crtc *crtc)
2871
{
2872
    struct drm_device *dev = crtc->dev;
2873
    struct drm_i915_private *dev_priv = dev->dev_private;
2874
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2875
    int pipe = intel_crtc->pipe;
2876
    int plane = intel_crtc->plane;
2877
    u32 temp;
2878
    bool is_pch_port;
2879
 
2880
    if (intel_crtc->active)
2881
        return;
2882
 
2883
    intel_crtc->active = true;
2884
    intel_update_watermarks(dev);
2885
 
2886
    if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2887
        temp = I915_READ(PCH_LVDS);
2888
        if ((temp & LVDS_PORT_EN) == 0)
2889
            I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2890
    }
2891
 
2892
    is_pch_port = intel_crtc_driving_pch(crtc);
2893
 
2894
    if (is_pch_port)
2895
        ironlake_fdi_pll_enable(crtc);
2896
    else
2897
        ironlake_fdi_disable(crtc);
2898
 
2899
    /* Enable panel fitting for LVDS */
2900
    if (dev_priv->pch_pf_size &&
2901
        (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2902
        /* Force use of hard-coded filter coefficients
2903
         * as some pre-programmed values are broken,
2904
         * e.g. x201.
2905
         */
2906
        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
2907
        I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
2908
        I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
2909
    }
2910
 
2911
    /*
2912
     * On ILK+ LUT must be loaded before the pipe is running but with
2913
     * clocks enabled
2914
     */
2915
    intel_crtc_load_lut(crtc);
2916
 
2917
    intel_enable_pipe(dev_priv, pipe, is_pch_port);
2918
    intel_enable_plane(dev_priv, plane, pipe);
2919
 
2920
    if (is_pch_port)
2921
        ironlake_pch_enable(crtc);
2922
 
2923
    mutex_lock(&dev->struct_mutex);
2924
    intel_update_fbc(dev);
2925
    mutex_unlock(&dev->struct_mutex);
2926
 
2927
//    intel_crtc_update_cursor(crtc, true);
2928
}
2929
 
2930
static void ironlake_crtc_disable(struct drm_crtc *crtc)
2931
{
2932
    struct drm_device *dev = crtc->dev;
2933
    struct drm_i915_private *dev_priv = dev->dev_private;
2934
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2935
    int pipe = intel_crtc->pipe;
2936
    int plane = intel_crtc->plane;
2937
    u32 reg, temp;
2938
 
2939
    if (!intel_crtc->active)
2940
        return;
2941
 
2942
    intel_crtc_wait_for_pending_flips(crtc);
2943
//    drm_vblank_off(dev, pipe);
2944
//    intel_crtc_update_cursor(crtc, false);
2945
 
2946
    intel_disable_plane(dev_priv, plane, pipe);
2947
 
2948
    if (dev_priv->cfb_plane == plane)
2949
        intel_disable_fbc(dev);
2950
 
2951
    intel_disable_pipe(dev_priv, pipe);
2952
 
2953
    /* Disable PF */
2954
    I915_WRITE(PF_CTL(pipe), 0);
2955
    I915_WRITE(PF_WIN_SZ(pipe), 0);
2956
 
2957
    ironlake_fdi_disable(crtc);
2958
 
2959
    /* This is a horrible layering violation; we should be doing this in
2960
     * the connector/encoder ->prepare instead, but we don't always have
2961
     * enough information there about the config to know whether it will
2962
     * actually be necessary or just cause undesired flicker.
2963
     */
2964
    intel_disable_pch_ports(dev_priv, pipe);
2965
 
2966
    intel_disable_transcoder(dev_priv, pipe);
2967
 
2968
    if (HAS_PCH_CPT(dev)) {
2969
        /* disable TRANS_DP_CTL */
2970
        reg = TRANS_DP_CTL(pipe);
2971
        temp = I915_READ(reg);
2972
        temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
2973
        temp |= TRANS_DP_PORT_SEL_NONE;
2974
        I915_WRITE(reg, temp);
2975
 
2976
        /* disable DPLL_SEL */
2977
        temp = I915_READ(PCH_DPLL_SEL);
2978
        switch (pipe) {
2979
        case 0:
2980
            temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2981
            break;
2982
        case 1:
2983
            temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2984
            break;
2985
        case 2:
2986
            /* FIXME: manage transcoder PLLs? */
2987
            temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
2988
            break;
2989
        default:
2990
            BUG(); /* wtf */
2991
        }
2992
        I915_WRITE(PCH_DPLL_SEL, temp);
2993
    }
2994
 
2995
    /* disable PCH DPLL */
2996
    intel_disable_pch_pll(dev_priv, pipe);
2997
 
2998
    /* Switch from PCDclk to Rawclk */
2999
    reg = FDI_RX_CTL(pipe);
3000
    temp = I915_READ(reg);
3001
    I915_WRITE(reg, temp & ~FDI_PCDCLK);
3002
 
3003
    /* Disable CPU FDI TX PLL */
3004
    reg = FDI_TX_CTL(pipe);
3005
    temp = I915_READ(reg);
3006
    I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3007
 
3008
    POSTING_READ(reg);
3009
    udelay(100);
3010
 
3011
    reg = FDI_RX_CTL(pipe);
3012
    temp = I915_READ(reg);
3013
    I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3014
 
3015
    /* Wait for the clocks to turn off. */
3016
    POSTING_READ(reg);
3017
    udelay(100);
3018
 
3019
    intel_crtc->active = false;
3020
    intel_update_watermarks(dev);
3021
 
3022
    mutex_lock(&dev->struct_mutex);
3023
    intel_update_fbc(dev);
3024
    intel_clear_scanline_wait(dev);
3025
    mutex_unlock(&dev->struct_mutex);
3026
}
3027
 
3028
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3029
{
3030
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031
    int pipe = intel_crtc->pipe;
3032
    int plane = intel_crtc->plane;
3033
 
3034
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3035
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3036
     */
3037
    switch (mode) {
3038
    case DRM_MODE_DPMS_ON:
3039
    case DRM_MODE_DPMS_STANDBY:
3040
    case DRM_MODE_DPMS_SUSPEND:
3041
        DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3042
        ironlake_crtc_enable(crtc);
3043
        break;
3044
 
3045
    case DRM_MODE_DPMS_OFF:
3046
        DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3047
        ironlake_crtc_disable(crtc);
3048
        break;
3049
    }
3050
}
3051
 
3052
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3053
{
3054
	if (!enable && intel_crtc->overlay) {
3055
		struct drm_device *dev = intel_crtc->base.dev;
3056
		struct drm_i915_private *dev_priv = dev->dev_private;
3057
 
3058
		mutex_lock(&dev->struct_mutex);
3059
		dev_priv->mm.interruptible = false;
3060
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3061
		dev_priv->mm.interruptible = true;
3062
		mutex_unlock(&dev->struct_mutex);
3063
	}
3064
 
3065
	/* Let userspace switch the overlay on again. In most cases userspace
3066
	 * has to recompute where to put it anyway.
3067
	 */
3068
}
3069
 
3070
static void i9xx_crtc_enable(struct drm_crtc *crtc)
3071
{
3072
    struct drm_device *dev = crtc->dev;
3073
    struct drm_i915_private *dev_priv = dev->dev_private;
3074
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3075
    int pipe = intel_crtc->pipe;
3076
    int plane = intel_crtc->plane;
3077
 
3078
    if (intel_crtc->active)
3079
        return;
3080
 
3081
    intel_crtc->active = true;
3082
    intel_update_watermarks(dev);
3083
 
3084
    intel_enable_pll(dev_priv, pipe);
3085
    intel_enable_pipe(dev_priv, pipe, false);
3086
    intel_enable_plane(dev_priv, plane, pipe);
3087
 
3088
    intel_crtc_load_lut(crtc);
3089
    intel_update_fbc(dev);
3090
 
3091
    /* Give the overlay scaler a chance to enable if it's on this pipe */
3092
    intel_crtc_dpms_overlay(intel_crtc, true);
3093
//    intel_crtc_update_cursor(crtc, true);
3094
}
3095
 
3096
static void i9xx_crtc_disable(struct drm_crtc *crtc)
3097
{
3098
    struct drm_device *dev = crtc->dev;
3099
    struct drm_i915_private *dev_priv = dev->dev_private;
3100
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3101
    int pipe = intel_crtc->pipe;
3102
    int plane = intel_crtc->plane;
3103
 
3104
    if (!intel_crtc->active)
3105
        return;
3106
 
3107
    /* Give the overlay scaler a chance to disable if it's on this pipe */
3108
    intel_crtc_wait_for_pending_flips(crtc);
3109
//    drm_vblank_off(dev, pipe);
3110
    intel_crtc_dpms_overlay(intel_crtc, false);
3111
//    intel_crtc_update_cursor(crtc, false);
3112
 
3113
    if (dev_priv->cfb_plane == plane)
3114
        intel_disable_fbc(dev);
3115
 
3116
    intel_disable_plane(dev_priv, plane, pipe);
3117
    intel_disable_pipe(dev_priv, pipe);
3118
    intel_disable_pll(dev_priv, pipe);
3119
 
3120
    intel_crtc->active = false;
3121
    intel_update_fbc(dev);
3122
    intel_update_watermarks(dev);
3123
    intel_clear_scanline_wait(dev);
3124
}
3125
 
3126
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3127
{
3128
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3129
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3130
     */
3131
    switch (mode) {
3132
    case DRM_MODE_DPMS_ON:
3133
    case DRM_MODE_DPMS_STANDBY:
3134
    case DRM_MODE_DPMS_SUSPEND:
3135
        i9xx_crtc_enable(crtc);
3136
        break;
3137
    case DRM_MODE_DPMS_OFF:
3138
        i9xx_crtc_disable(crtc);
3139
        break;
3140
    }
3141
}
3142
 
2330 Serge 3143
/**
3144
 * Sets the power management mode of the pipe and plane.
3145
 */
3146
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3147
{
3148
	struct drm_device *dev = crtc->dev;
3149
	struct drm_i915_private *dev_priv = dev->dev_private;
3150
	struct drm_i915_master_private *master_priv;
3151
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3152
	int pipe = intel_crtc->pipe;
3153
	bool enabled;
2327 Serge 3154
 
2330 Serge 3155
	if (intel_crtc->dpms_mode == mode)
3156
		return;
2327 Serge 3157
 
2330 Serge 3158
	intel_crtc->dpms_mode = mode;
2327 Serge 3159
 
2330 Serge 3160
	dev_priv->display.dpms(crtc, mode);
2327 Serge 3161
 
2330 Serge 3162
	if (!dev->primary->master)
3163
		return;
2327 Serge 3164
 
2330 Serge 3165
	master_priv = dev->primary->master->driver_priv;
3166
	if (!master_priv->sarea_priv)
3167
		return;
2327 Serge 3168
 
2330 Serge 3169
	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
2327 Serge 3170
 
2330 Serge 3171
	switch (pipe) {
3172
	case 0:
3173
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3174
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3175
		break;
3176
	case 1:
3177
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3178
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3179
		break;
3180
	default:
3181
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3182
		break;
3183
	}
3184
}
2327 Serge 3185
 
2330 Serge 3186
static void intel_crtc_disable(struct drm_crtc *crtc)
3187
{
3188
	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3189
	struct drm_device *dev = crtc->dev;
2327 Serge 3190
 
2330 Serge 3191
	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
2327 Serge 3192
 
2330 Serge 3193
	if (crtc->fb) {
3194
		mutex_lock(&dev->struct_mutex);
3195
//		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
3196
		mutex_unlock(&dev->struct_mutex);
3197
	}
3198
}
2327 Serge 3199
 
2330 Serge 3200
/* Prepare for a mode set.
3201
 *
3202
 * Note we could be a lot smarter here.  We need to figure out which outputs
3203
 * will be enabled, which disabled (in short, how the config will changes)
3204
 * and perform the minimum necessary steps to accomplish that, e.g. updating
3205
 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3206
 * panel fitting is in the proper state, etc.
3207
 */
3208
static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3209
{
3210
	i9xx_crtc_disable(crtc);
3211
}
2327 Serge 3212
 
2330 Serge 3213
static void i9xx_crtc_commit(struct drm_crtc *crtc)
3214
{
3215
	i9xx_crtc_enable(crtc);
3216
}
2327 Serge 3217
 
2330 Serge 3218
static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3219
{
3220
	ironlake_crtc_disable(crtc);
3221
}
2327 Serge 3222
 
2330 Serge 3223
static void ironlake_crtc_commit(struct drm_crtc *crtc)
3224
{
3225
	ironlake_crtc_enable(crtc);
3226
}
2327 Serge 3227
 
2330 Serge 3228
void intel_encoder_prepare (struct drm_encoder *encoder)
3229
{
3230
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3231
	/* lvds has its own version of prepare see intel_lvds_prepare */
3232
	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3233
}
2327 Serge 3234
 
2330 Serge 3235
void intel_encoder_commit (struct drm_encoder *encoder)
3236
{
3237
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3238
	/* lvds has its own version of commit see intel_lvds_commit */
3239
	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3240
}
2327 Serge 3241
 
2330 Serge 3242
void intel_encoder_destroy(struct drm_encoder *encoder)
3243
{
3244
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3245
 
3246
	drm_encoder_cleanup(encoder);
3247
	kfree(intel_encoder);
3248
}
3249
 
3250
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3251
				  struct drm_display_mode *mode,
3252
				  struct drm_display_mode *adjusted_mode)
3253
{
3254
	struct drm_device *dev = crtc->dev;
3255
 
3256
	if (HAS_PCH_SPLIT(dev)) {
3257
		/* FDI link clock is fixed at 2.7G */
3258
		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3259
			return false;
3260
	}
3261
 
3262
	/* XXX some encoders set the crtcinfo, others don't.
3263
	 * Obviously we need some form of conflict resolution here...
3264
	 */
3265
	if (adjusted_mode->crtc_htotal == 0)
3266
		drm_mode_set_crtcinfo(adjusted_mode, 0);
3267
 
3268
	return true;
3269
}
3270
 
2327 Serge 3271
static int i945_get_display_clock_speed(struct drm_device *dev)
3272
{
3273
	return 400000;
3274
}
3275
 
3276
static int i915_get_display_clock_speed(struct drm_device *dev)
3277
{
3278
	return 333000;
3279
}
3280
 
3281
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3282
{
3283
	return 200000;
3284
}
3285
 
3286
static int i915gm_get_display_clock_speed(struct drm_device *dev)
3287
{
3288
	u16 gcfgc = 0;
3289
 
3290
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3291
 
3292
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3293
		return 133000;
3294
	else {
3295
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3296
		case GC_DISPLAY_CLOCK_333_MHZ:
3297
			return 333000;
3298
		default:
3299
		case GC_DISPLAY_CLOCK_190_200_MHZ:
3300
			return 190000;
3301
		}
3302
	}
3303
}
3304
 
3305
static int i865_get_display_clock_speed(struct drm_device *dev)
3306
{
3307
	return 266000;
3308
}
3309
 
3310
static int i855_get_display_clock_speed(struct drm_device *dev)
3311
{
3312
	u16 hpllcc = 0;
3313
	/* Assume that the hardware is in the high speed state.  This
3314
	 * should be the default.
3315
	 */
3316
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3317
	case GC_CLOCK_133_200:
3318
	case GC_CLOCK_100_200:
3319
		return 200000;
3320
	case GC_CLOCK_166_250:
3321
		return 250000;
3322
	case GC_CLOCK_100_133:
3323
		return 133000;
3324
	}
3325
 
3326
	/* Shouldn't happen */
3327
	return 0;
3328
}
3329
 
3330
static int i830_get_display_clock_speed(struct drm_device *dev)
3331
{
3332
	return 133000;
3333
}
3334
 
3335
struct fdi_m_n {
3336
    u32        tu;
3337
    u32        gmch_m;
3338
    u32        gmch_n;
3339
    u32        link_m;
3340
    u32        link_n;
3341
};
3342
 
3343
static void
3344
fdi_reduce_ratio(u32 *num, u32 *den)
3345
{
3346
	while (*num > 0xffffff || *den > 0xffffff) {
3347
		*num >>= 1;
3348
		*den >>= 1;
3349
	}
3350
}
3351
 
3352
static void
3353
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3354
		     int link_clock, struct fdi_m_n *m_n)
3355
{
3356
	m_n->tu = 64; /* default size */
3357
 
3358
	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3359
	m_n->gmch_m = bits_per_pixel * pixel_clock;
3360
	m_n->gmch_n = link_clock * nlanes * 8;
3361
	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3362
 
3363
	m_n->link_m = pixel_clock;
3364
	m_n->link_n = link_clock;
3365
	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3366
}
3367
 
3368
 
3369
struct intel_watermark_params {
3370
    unsigned long fifo_size;
3371
    unsigned long max_wm;
3372
    unsigned long default_wm;
3373
    unsigned long guard_size;
3374
    unsigned long cacheline_size;
3375
};
3376
 
3377
/* Pineview has different values for various configs */
3378
static const struct intel_watermark_params pineview_display_wm = {
3379
    PINEVIEW_DISPLAY_FIFO,
3380
    PINEVIEW_MAX_WM,
3381
    PINEVIEW_DFT_WM,
3382
    PINEVIEW_GUARD_WM,
3383
    PINEVIEW_FIFO_LINE_SIZE
3384
};
3385
static const struct intel_watermark_params pineview_display_hplloff_wm = {
3386
    PINEVIEW_DISPLAY_FIFO,
3387
    PINEVIEW_MAX_WM,
3388
    PINEVIEW_DFT_HPLLOFF_WM,
3389
    PINEVIEW_GUARD_WM,
3390
    PINEVIEW_FIFO_LINE_SIZE
3391
};
3392
static const struct intel_watermark_params pineview_cursor_wm = {
3393
    PINEVIEW_CURSOR_FIFO,
3394
    PINEVIEW_CURSOR_MAX_WM,
3395
    PINEVIEW_CURSOR_DFT_WM,
3396
    PINEVIEW_CURSOR_GUARD_WM,
3397
    PINEVIEW_FIFO_LINE_SIZE,
3398
};
3399
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3400
    PINEVIEW_CURSOR_FIFO,
3401
    PINEVIEW_CURSOR_MAX_WM,
3402
    PINEVIEW_CURSOR_DFT_WM,
3403
    PINEVIEW_CURSOR_GUARD_WM,
3404
    PINEVIEW_FIFO_LINE_SIZE
3405
};
3406
static const struct intel_watermark_params g4x_wm_info = {
3407
    G4X_FIFO_SIZE,
3408
    G4X_MAX_WM,
3409
    G4X_MAX_WM,
3410
    2,
3411
    G4X_FIFO_LINE_SIZE,
3412
};
3413
static const struct intel_watermark_params g4x_cursor_wm_info = {
3414
    I965_CURSOR_FIFO,
3415
    I965_CURSOR_MAX_WM,
3416
    I965_CURSOR_DFT_WM,
3417
    2,
3418
    G4X_FIFO_LINE_SIZE,
3419
};
3420
static const struct intel_watermark_params i965_cursor_wm_info = {
3421
    I965_CURSOR_FIFO,
3422
    I965_CURSOR_MAX_WM,
3423
    I965_CURSOR_DFT_WM,
3424
    2,
3425
    I915_FIFO_LINE_SIZE,
3426
};
3427
static const struct intel_watermark_params i945_wm_info = {
3428
    I945_FIFO_SIZE,
3429
    I915_MAX_WM,
3430
    1,
3431
    2,
3432
    I915_FIFO_LINE_SIZE
3433
};
3434
static const struct intel_watermark_params i915_wm_info = {
3435
    I915_FIFO_SIZE,
3436
    I915_MAX_WM,
3437
    1,
3438
    2,
3439
    I915_FIFO_LINE_SIZE
3440
};
3441
static const struct intel_watermark_params i855_wm_info = {
3442
    I855GM_FIFO_SIZE,
3443
    I915_MAX_WM,
3444
    1,
3445
    2,
3446
    I830_FIFO_LINE_SIZE
3447
};
3448
static const struct intel_watermark_params i830_wm_info = {
3449
    I830_FIFO_SIZE,
3450
    I915_MAX_WM,
3451
    1,
3452
    2,
3453
    I830_FIFO_LINE_SIZE
3454
};
3455
 
3456
static const struct intel_watermark_params ironlake_display_wm_info = {
3457
    ILK_DISPLAY_FIFO,
3458
    ILK_DISPLAY_MAXWM,
3459
    ILK_DISPLAY_DFTWM,
3460
    2,
3461
    ILK_FIFO_LINE_SIZE
3462
};
3463
static const struct intel_watermark_params ironlake_cursor_wm_info = {
3464
    ILK_CURSOR_FIFO,
3465
    ILK_CURSOR_MAXWM,
3466
    ILK_CURSOR_DFTWM,
3467
    2,
3468
    ILK_FIFO_LINE_SIZE
3469
};
3470
static const struct intel_watermark_params ironlake_display_srwm_info = {
3471
    ILK_DISPLAY_SR_FIFO,
3472
    ILK_DISPLAY_MAX_SRWM,
3473
    ILK_DISPLAY_DFT_SRWM,
3474
    2,
3475
    ILK_FIFO_LINE_SIZE
3476
};
3477
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3478
    ILK_CURSOR_SR_FIFO,
3479
    ILK_CURSOR_MAX_SRWM,
3480
    ILK_CURSOR_DFT_SRWM,
3481
    2,
3482
    ILK_FIFO_LINE_SIZE
3483
};
3484
 
3485
static const struct intel_watermark_params sandybridge_display_wm_info = {
3486
    SNB_DISPLAY_FIFO,
3487
    SNB_DISPLAY_MAXWM,
3488
    SNB_DISPLAY_DFTWM,
3489
    2,
3490
    SNB_FIFO_LINE_SIZE
3491
};
3492
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3493
    SNB_CURSOR_FIFO,
3494
    SNB_CURSOR_MAXWM,
3495
    SNB_CURSOR_DFTWM,
3496
    2,
3497
    SNB_FIFO_LINE_SIZE
3498
};
3499
static const struct intel_watermark_params sandybridge_display_srwm_info = {
3500
    SNB_DISPLAY_SR_FIFO,
3501
    SNB_DISPLAY_MAX_SRWM,
3502
    SNB_DISPLAY_DFT_SRWM,
3503
    2,
3504
    SNB_FIFO_LINE_SIZE
3505
};
3506
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3507
    SNB_CURSOR_SR_FIFO,
3508
    SNB_CURSOR_MAX_SRWM,
3509
    SNB_CURSOR_DFT_SRWM,
3510
    2,
3511
    SNB_FIFO_LINE_SIZE
3512
};
3513
 
3514
 
3515
/**
3516
 * intel_calculate_wm - calculate watermark level
3517
 * @clock_in_khz: pixel clock
3518
 * @wm: chip FIFO params
3519
 * @pixel_size: display pixel size
3520
 * @latency_ns: memory latency for the platform
3521
 *
3522
 * Calculate the watermark level (the level at which the display plane will
3523
 * start fetching from memory again).  Each chip has a different display
3524
 * FIFO size and allocation, so the caller needs to figure that out and pass
3525
 * in the correct intel_watermark_params structure.
3526
 *
3527
 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3528
 * on the pixel size.  When it reaches the watermark level, it'll start
3529
 * fetching FIFO line sized based chunks from memory until the FIFO fills
3530
 * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3531
 * will occur, and a display engine hang could result.
3532
 */
3533
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3534
                    const struct intel_watermark_params *wm,
3535
                    int fifo_size,
3536
                    int pixel_size,
3537
                    unsigned long latency_ns)
3538
{
3539
    long entries_required, wm_size;
3540
 
3541
    /*
3542
     * Note: we need to make sure we don't overflow for various clock &
3543
     * latency values.
3544
     * clocks go from a few thousand to several hundred thousand.
3545
     * latency is usually a few thousand
3546
     */
3547
    entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3548
        1000;
3549
    entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3550
 
3551
    DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3552
 
3553
    wm_size = fifo_size - (entries_required + wm->guard_size);
3554
 
3555
    DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3556
 
3557
    /* Don't promote wm_size to unsigned... */
3558
    if (wm_size > (long)wm->max_wm)
3559
        wm_size = wm->max_wm;
3560
    if (wm_size <= 0)
3561
        wm_size = wm->default_wm;
3562
    return wm_size;
3563
}
3564
 
3565
struct cxsr_latency {
3566
    int is_desktop;
3567
    int is_ddr3;
3568
    unsigned long fsb_freq;
3569
    unsigned long mem_freq;
3570
    unsigned long display_sr;
3571
    unsigned long display_hpll_disable;
3572
    unsigned long cursor_sr;
3573
    unsigned long cursor_hpll_disable;
3574
};
3575
 
3576
static const struct cxsr_latency cxsr_latency_table[] = {
3577
    {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3578
    {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3579
    {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3580
    {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3581
    {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3582
 
3583
    {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3584
    {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3585
    {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3586
    {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3587
    {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3588
 
3589
    {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3590
    {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3591
    {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3592
    {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3593
    {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3594
 
3595
    {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3596
    {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3597
    {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3598
    {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3599
    {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3600
 
3601
    {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3602
    {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3603
    {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3604
    {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3605
    {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3606
 
3607
    {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3608
    {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3609
    {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3610
    {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3611
    {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3612
};
3613
 
3614
static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3615
                             int is_ddr3,
3616
                             int fsb,
3617
                             int mem)
3618
{
3619
    const struct cxsr_latency *latency;
3620
    int i;
3621
 
3622
    if (fsb == 0 || mem == 0)
3623
        return NULL;
3624
 
3625
    for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3626
        latency = &cxsr_latency_table[i];
3627
        if (is_desktop == latency->is_desktop &&
3628
            is_ddr3 == latency->is_ddr3 &&
3629
            fsb == latency->fsb_freq && mem == latency->mem_freq)
3630
            return latency;
3631
    }
3632
 
3633
    DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3634
 
3635
    return NULL;
3636
}
3637
 
3638
static void pineview_disable_cxsr(struct drm_device *dev)
3639
{
3640
    struct drm_i915_private *dev_priv = dev->dev_private;
3641
 
3642
    /* deactivate cxsr */
3643
    I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3644
}
3645
 
3646
/*
3647
 * Latency for FIFO fetches is dependent on several factors:
3648
 *   - memory configuration (speed, channels)
3649
 *   - chipset
3650
 *   - current MCH state
3651
 * It can be fairly high in some situations, so here we assume a fairly
3652
 * pessimal value.  It's a tradeoff between extra memory fetches (if we
3653
 * set this value too high, the FIFO will fetch frequently to stay full)
3654
 * and power consumption (set it too low to save power and we might see
3655
 * FIFO underruns and display "flicker").
3656
 *
3657
 * A value of 5us seems to be a good balance; safe for very low end
3658
 * platforms but not overly aggressive on lower latency configs.
3659
 */
3660
static const int latency_ns = 5000;
3661
 
3662
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3663
{
3664
	struct drm_i915_private *dev_priv = dev->dev_private;
3665
	uint32_t dsparb = I915_READ(DSPARB);
3666
	int size;
3667
 
3668
	size = dsparb & 0x7f;
3669
	if (plane)
3670
		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3671
 
3672
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3673
		      plane ? "B" : "A", size);
3674
 
3675
	return size;
3676
}
3677
 
3678
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3679
{
3680
	struct drm_i915_private *dev_priv = dev->dev_private;
3681
	uint32_t dsparb = I915_READ(DSPARB);
3682
	int size;
3683
 
3684
	size = dsparb & 0x1ff;
3685
	if (plane)
3686
		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3687
	size >>= 1; /* Convert to cachelines */
3688
 
3689
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3690
		      plane ? "B" : "A", size);
3691
 
3692
	return size;
3693
}
3694
 
3695
static int i845_get_fifo_size(struct drm_device *dev, int plane)
3696
{
3697
	struct drm_i915_private *dev_priv = dev->dev_private;
3698
	uint32_t dsparb = I915_READ(DSPARB);
3699
	int size;
3700
 
3701
	size = dsparb & 0x7f;
3702
	size >>= 2; /* Convert to cachelines */
3703
 
3704
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3705
		      plane ? "B" : "A",
3706
		      size);
3707
 
3708
	return size;
3709
}
3710
 
3711
static int i830_get_fifo_size(struct drm_device *dev, int plane)
3712
{
3713
	struct drm_i915_private *dev_priv = dev->dev_private;
3714
	uint32_t dsparb = I915_READ(DSPARB);
3715
	int size;
3716
 
3717
	size = dsparb & 0x7f;
3718
	size >>= 1; /* Convert to cachelines */
3719
 
3720
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3721
		      plane ? "B" : "A", size);
3722
 
3723
	return size;
3724
}
3725
 
3726
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3727
{
3728
    struct drm_crtc *crtc, *enabled = NULL;
3729
 
3730
    list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3731
        if (crtc->enabled && crtc->fb) {
3732
            if (enabled)
3733
                return NULL;
3734
            enabled = crtc;
3735
        }
3736
    }
3737
 
3738
    return enabled;
3739
}
3740
 
3741
static void pineview_update_wm(struct drm_device *dev)
3742
{
3743
	struct drm_i915_private *dev_priv = dev->dev_private;
3744
	struct drm_crtc *crtc;
3745
	const struct cxsr_latency *latency;
3746
	u32 reg;
3747
	unsigned long wm;
3748
 
3749
	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3750
					 dev_priv->fsb_freq, dev_priv->mem_freq);
3751
	if (!latency) {
3752
		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3753
		pineview_disable_cxsr(dev);
3754
		return;
3755
	}
3756
 
3757
	crtc = single_enabled_crtc(dev);
3758
	if (crtc) {
3759
		int clock = crtc->mode.clock;
3760
		int pixel_size = crtc->fb->bits_per_pixel / 8;
3761
 
3762
		/* Display SR */
3763
		wm = intel_calculate_wm(clock, &pineview_display_wm,
3764
					pineview_display_wm.fifo_size,
3765
					pixel_size, latency->display_sr);
3766
		reg = I915_READ(DSPFW1);
3767
		reg &= ~DSPFW_SR_MASK;
3768
		reg |= wm << DSPFW_SR_SHIFT;
3769
		I915_WRITE(DSPFW1, reg);
3770
		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3771
 
3772
		/* cursor SR */
3773
		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3774
					pineview_display_wm.fifo_size,
3775
					pixel_size, latency->cursor_sr);
3776
		reg = I915_READ(DSPFW3);
3777
		reg &= ~DSPFW_CURSOR_SR_MASK;
3778
		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3779
		I915_WRITE(DSPFW3, reg);
3780
 
3781
		/* Display HPLL off SR */
3782
		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3783
					pineview_display_hplloff_wm.fifo_size,
3784
					pixel_size, latency->display_hpll_disable);
3785
		reg = I915_READ(DSPFW3);
3786
		reg &= ~DSPFW_HPLL_SR_MASK;
3787
		reg |= wm & DSPFW_HPLL_SR_MASK;
3788
		I915_WRITE(DSPFW3, reg);
3789
 
3790
		/* cursor HPLL off SR */
3791
		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
3792
					pineview_display_hplloff_wm.fifo_size,
3793
					pixel_size, latency->cursor_hpll_disable);
3794
		reg = I915_READ(DSPFW3);
3795
		reg &= ~DSPFW_HPLL_CURSOR_MASK;
3796
		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
3797
		I915_WRITE(DSPFW3, reg);
3798
		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
3799
 
3800
		/* activate cxsr */
3801
		I915_WRITE(DSPFW3,
3802
			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
3803
		DRM_DEBUG_KMS("Self-refresh is enabled\n");
3804
	} else {
3805
		pineview_disable_cxsr(dev);
3806
		DRM_DEBUG_KMS("Self-refresh is disabled\n");
3807
	}
3808
}
3809
 
3810
static bool g4x_compute_wm0(struct drm_device *dev,
3811
                int plane,
3812
                const struct intel_watermark_params *display,
3813
                int display_latency_ns,
3814
                const struct intel_watermark_params *cursor,
3815
                int cursor_latency_ns,
3816
                int *plane_wm,
3817
                int *cursor_wm)
3818
{
3819
    struct drm_crtc *crtc;
3820
    int htotal, hdisplay, clock, pixel_size;
3821
    int line_time_us, line_count;
3822
    int entries, tlb_miss;
3823
 
3824
    crtc = intel_get_crtc_for_plane(dev, plane);
3825
    if (crtc->fb == NULL || !crtc->enabled) {
3826
        *cursor_wm = cursor->guard_size;
3827
        *plane_wm = display->guard_size;
3828
        return false;
3829
    }
3830
 
3831
    htotal = crtc->mode.htotal;
3832
    hdisplay = crtc->mode.hdisplay;
3833
    clock = crtc->mode.clock;
3834
    pixel_size = crtc->fb->bits_per_pixel / 8;
3835
 
3836
    /* Use the small buffer method to calculate plane watermark */
3837
    entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3838
    tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3839
    if (tlb_miss > 0)
3840
        entries += tlb_miss;
3841
    entries = DIV_ROUND_UP(entries, display->cacheline_size);
3842
    *plane_wm = entries + display->guard_size;
3843
    if (*plane_wm > (int)display->max_wm)
3844
        *plane_wm = display->max_wm;
3845
 
3846
    /* Use the large buffer method to calculate cursor watermark */
3847
    line_time_us = ((htotal * 1000) / clock);
3848
    line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3849
    entries = line_count * 64 * pixel_size;
3850
    tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3851
    if (tlb_miss > 0)
3852
        entries += tlb_miss;
3853
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3854
    *cursor_wm = entries + cursor->guard_size;
3855
    if (*cursor_wm > (int)cursor->max_wm)
3856
        *cursor_wm = (int)cursor->max_wm;
3857
 
3858
    return true;
3859
}
3860
 
3861
/*
3862
 * Check the wm result.
3863
 *
3864
 * If any calculated watermark values is larger than the maximum value that
3865
 * can be programmed into the associated watermark register, that watermark
3866
 * must be disabled.
3867
 */
3868
static bool g4x_check_srwm(struct drm_device *dev,
3869
			   int display_wm, int cursor_wm,
3870
			   const struct intel_watermark_params *display,
3871
			   const struct intel_watermark_params *cursor)
3872
{
3873
	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
3874
		      display_wm, cursor_wm);
3875
 
3876
	if (display_wm > display->max_wm) {
3877
		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
3878
			      display_wm, display->max_wm);
3879
		return false;
3880
	}
3881
 
3882
	if (cursor_wm > cursor->max_wm) {
3883
		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
3884
			      cursor_wm, cursor->max_wm);
3885
		return false;
3886
	}
3887
 
3888
	if (!(display_wm || cursor_wm)) {
3889
		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
3890
		return false;
3891
	}
3892
 
3893
	return true;
3894
}
3895
 
3896
static bool g4x_compute_srwm(struct drm_device *dev,
3897
			     int plane,
3898
			     int latency_ns,
3899
			     const struct intel_watermark_params *display,
3900
			     const struct intel_watermark_params *cursor,
3901
			     int *display_wm, int *cursor_wm)
3902
{
3903
	struct drm_crtc *crtc;
3904
	int hdisplay, htotal, pixel_size, clock;
3905
	unsigned long line_time_us;
3906
	int line_count, line_size;
3907
	int small, large;
3908
	int entries;
3909
 
3910
	if (!latency_ns) {
3911
		*display_wm = *cursor_wm = 0;
3912
		return false;
3913
	}
3914
 
3915
	crtc = intel_get_crtc_for_plane(dev, plane);
3916
	hdisplay = crtc->mode.hdisplay;
3917
	htotal = crtc->mode.htotal;
3918
	clock = crtc->mode.clock;
3919
	pixel_size = crtc->fb->bits_per_pixel / 8;
3920
 
3921
	line_time_us = (htotal * 1000) / clock;
3922
	line_count = (latency_ns / line_time_us + 1000) / 1000;
3923
	line_size = hdisplay * pixel_size;
3924
 
3925
	/* Use the minimum of the small and large buffer method for primary */
3926
	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3927
	large = line_count * line_size;
3928
 
3929
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3930
	*display_wm = entries + display->guard_size;
3931
 
3932
	/* calculate the self-refresh watermark for display cursor */
3933
	entries = line_count * pixel_size * 64;
3934
	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3935
	*cursor_wm = entries + cursor->guard_size;
3936
 
3937
	return g4x_check_srwm(dev,
3938
			      *display_wm, *cursor_wm,
3939
			      display, cursor);
3940
}
3941
 
3942
#define single_plane_enabled(mask) is_power_of_2(mask)
3943
 
3944
static void g4x_update_wm(struct drm_device *dev)
3945
{
3946
	static const int sr_latency_ns = 12000;
3947
	struct drm_i915_private *dev_priv = dev->dev_private;
3948
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
3949
	int plane_sr, cursor_sr;
3950
	unsigned int enabled = 0;
3951
 
3952
	if (g4x_compute_wm0(dev, 0,
3953
			    &g4x_wm_info, latency_ns,
3954
			    &g4x_cursor_wm_info, latency_ns,
3955
			    &planea_wm, &cursora_wm))
3956
		enabled |= 1;
3957
 
3958
	if (g4x_compute_wm0(dev, 1,
3959
			    &g4x_wm_info, latency_ns,
3960
			    &g4x_cursor_wm_info, latency_ns,
3961
			    &planeb_wm, &cursorb_wm))
3962
		enabled |= 2;
3963
 
3964
	plane_sr = cursor_sr = 0;
3965
	if (single_plane_enabled(enabled) &&
3966
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
3967
			     sr_latency_ns,
3968
			     &g4x_wm_info,
3969
			     &g4x_cursor_wm_info,
3970
			     &plane_sr, &cursor_sr))
3971
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3972
	else
3973
		I915_WRITE(FW_BLC_SELF,
3974
			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
3975
 
3976
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
3977
		      planea_wm, cursora_wm,
3978
		      planeb_wm, cursorb_wm,
3979
		      plane_sr, cursor_sr);
3980
 
3981
	I915_WRITE(DSPFW1,
3982
		   (plane_sr << DSPFW_SR_SHIFT) |
3983
		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
3984
		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
3985
		   planea_wm);
3986
	I915_WRITE(DSPFW2,
3987
		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
3988
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
3989
	/* HPLL off in SR has some issues on G4x... disable it */
3990
	I915_WRITE(DSPFW3,
3991
		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
3992
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
3993
}
3994
 
3995
static void i965_update_wm(struct drm_device *dev)
3996
{
3997
	struct drm_i915_private *dev_priv = dev->dev_private;
3998
	struct drm_crtc *crtc;
3999
	int srwm = 1;
4000
	int cursor_sr = 16;
4001
 
4002
	/* Calc sr entries for one plane configs */
4003
	crtc = single_enabled_crtc(dev);
4004
	if (crtc) {
4005
		/* self-refresh has much higher latency */
4006
		static const int sr_latency_ns = 12000;
4007
		int clock = crtc->mode.clock;
4008
		int htotal = crtc->mode.htotal;
4009
		int hdisplay = crtc->mode.hdisplay;
4010
		int pixel_size = crtc->fb->bits_per_pixel / 8;
4011
		unsigned long line_time_us;
4012
		int entries;
4013
 
4014
		line_time_us = ((htotal * 1000) / clock);
4015
 
4016
		/* Use ns/us then divide to preserve precision */
4017
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4018
			pixel_size * hdisplay;
4019
		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4020
		srwm = I965_FIFO_SIZE - entries;
4021
		if (srwm < 0)
4022
			srwm = 1;
4023
		srwm &= 0x1ff;
4024
		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4025
			      entries, srwm);
4026
 
4027
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4028
			pixel_size * 64;
4029
		entries = DIV_ROUND_UP(entries,
4030
					  i965_cursor_wm_info.cacheline_size);
4031
		cursor_sr = i965_cursor_wm_info.fifo_size -
4032
			(entries + i965_cursor_wm_info.guard_size);
4033
 
4034
		if (cursor_sr > i965_cursor_wm_info.max_wm)
4035
			cursor_sr = i965_cursor_wm_info.max_wm;
4036
 
4037
		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4038
			      "cursor %d\n", srwm, cursor_sr);
4039
 
4040
		if (IS_CRESTLINE(dev))
4041
			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4042
	} else {
4043
		/* Turn off self refresh if both pipes are enabled */
4044
		if (IS_CRESTLINE(dev))
4045
			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4046
				   & ~FW_BLC_SELF_EN);
4047
	}
4048
 
4049
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4050
		      srwm);
4051
 
4052
	/* 965 has limitations... */
4053
	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4054
		   (8 << 16) | (8 << 8) | (8 << 0));
4055
	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4056
	/* update cursor SR watermark */
4057
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4058
}
4059
 
4060
static void i9xx_update_wm(struct drm_device *dev)
4061
{
4062
	struct drm_i915_private *dev_priv = dev->dev_private;
4063
	const struct intel_watermark_params *wm_info;
4064
	uint32_t fwater_lo;
4065
	uint32_t fwater_hi;
4066
	int cwm, srwm = 1;
4067
	int fifo_size;
4068
	int planea_wm, planeb_wm;
4069
	struct drm_crtc *crtc, *enabled = NULL;
4070
 
4071
	if (IS_I945GM(dev))
4072
		wm_info = &i945_wm_info;
4073
	else if (!IS_GEN2(dev))
4074
		wm_info = &i915_wm_info;
4075
	else
4076
		wm_info = &i855_wm_info;
4077
 
4078
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4079
	crtc = intel_get_crtc_for_plane(dev, 0);
4080
	if (crtc->enabled && crtc->fb) {
4081
		planea_wm = intel_calculate_wm(crtc->mode.clock,
4082
					       wm_info, fifo_size,
4083
					       crtc->fb->bits_per_pixel / 8,
4084
					       latency_ns);
4085
		enabled = crtc;
4086
	} else
4087
		planea_wm = fifo_size - wm_info->guard_size;
4088
 
4089
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4090
	crtc = intel_get_crtc_for_plane(dev, 1);
4091
	if (crtc->enabled && crtc->fb) {
4092
		planeb_wm = intel_calculate_wm(crtc->mode.clock,
4093
					       wm_info, fifo_size,
4094
					       crtc->fb->bits_per_pixel / 8,
4095
					       latency_ns);
4096
		if (enabled == NULL)
4097
			enabled = crtc;
4098
		else
4099
			enabled = NULL;
4100
	} else
4101
		planeb_wm = fifo_size - wm_info->guard_size;
4102
 
4103
	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4104
 
4105
	/*
4106
	 * Overlay gets an aggressive default since video jitter is bad.
4107
	 */
4108
	cwm = 2;
4109
 
4110
	/* Play safe and disable self-refresh before adjusting watermarks. */
4111
	if (IS_I945G(dev) || IS_I945GM(dev))
4112
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4113
	else if (IS_I915GM(dev))
4114
		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4115
 
4116
	/* Calc sr entries for one plane configs */
4117
	if (HAS_FW_BLC(dev) && enabled) {
4118
		/* self-refresh has much higher latency */
4119
		static const int sr_latency_ns = 6000;
4120
		int clock = enabled->mode.clock;
4121
		int htotal = enabled->mode.htotal;
4122
		int hdisplay = enabled->mode.hdisplay;
4123
		int pixel_size = enabled->fb->bits_per_pixel / 8;
4124
		unsigned long line_time_us;
4125
		int entries;
4126
 
4127
		line_time_us = (htotal * 1000) / clock;
4128
 
4129
		/* Use ns/us then divide to preserve precision */
4130
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4131
			pixel_size * hdisplay;
4132
		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4133
		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4134
		srwm = wm_info->fifo_size - entries;
4135
		if (srwm < 0)
4136
			srwm = 1;
4137
 
4138
		if (IS_I945G(dev) || IS_I945GM(dev))
4139
			I915_WRITE(FW_BLC_SELF,
4140
				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4141
		else if (IS_I915GM(dev))
4142
			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4143
	}
4144
 
4145
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4146
		      planea_wm, planeb_wm, cwm, srwm);
4147
 
4148
	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4149
	fwater_hi = (cwm & 0x1f);
4150
 
4151
	/* Set request length to 8 cachelines per fetch */
4152
	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4153
	fwater_hi = fwater_hi | (1 << 8);
4154
 
4155
	I915_WRITE(FW_BLC, fwater_lo);
4156
	I915_WRITE(FW_BLC2, fwater_hi);
4157
 
4158
	if (HAS_FW_BLC(dev)) {
4159
		if (enabled) {
4160
			if (IS_I945G(dev) || IS_I945GM(dev))
4161
				I915_WRITE(FW_BLC_SELF,
4162
					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4163
			else if (IS_I915GM(dev))
4164
				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4165
			DRM_DEBUG_KMS("memory self refresh enabled\n");
4166
		} else
4167
			DRM_DEBUG_KMS("memory self refresh disabled\n");
4168
	}
4169
}
4170
 
4171
static void i830_update_wm(struct drm_device *dev)
4172
{
4173
	struct drm_i915_private *dev_priv = dev->dev_private;
4174
	struct drm_crtc *crtc;
4175
	uint32_t fwater_lo;
4176
	int planea_wm;
4177
 
4178
	crtc = single_enabled_crtc(dev);
4179
	if (crtc == NULL)
4180
		return;
4181
 
4182
	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4183
				       dev_priv->display.get_fifo_size(dev, 0),
4184
				       crtc->fb->bits_per_pixel / 8,
4185
				       latency_ns);
4186
	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4187
	fwater_lo |= (3<<8) | planea_wm;
4188
 
4189
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4190
 
4191
	I915_WRITE(FW_BLC, fwater_lo);
4192
}
4193
 
4194
#define ILK_LP0_PLANE_LATENCY		700
4195
#define ILK_LP0_CURSOR_LATENCY		1300
4196
 
4197
/*
4198
 * Check the wm result.
4199
 *
4200
 * If any calculated watermark values is larger than the maximum value that
4201
 * can be programmed into the associated watermark register, that watermark
4202
 * must be disabled.
4203
 */
4204
static bool ironlake_check_srwm(struct drm_device *dev, int level,
4205
				int fbc_wm, int display_wm, int cursor_wm,
4206
				const struct intel_watermark_params *display,
4207
				const struct intel_watermark_params *cursor)
4208
{
4209
	struct drm_i915_private *dev_priv = dev->dev_private;
4210
 
4211
	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4212
		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4213
 
4214
	if (fbc_wm > SNB_FBC_MAX_SRWM) {
4215
		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4216
			      fbc_wm, SNB_FBC_MAX_SRWM, level);
4217
 
4218
		/* fbc has it's own way to disable FBC WM */
4219
		I915_WRITE(DISP_ARB_CTL,
4220
			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4221
		return false;
4222
	}
4223
 
4224
	if (display_wm > display->max_wm) {
4225
		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4226
			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
4227
		return false;
4228
	}
4229
 
4230
	if (cursor_wm > cursor->max_wm) {
4231
		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4232
			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4233
		return false;
4234
	}
4235
 
4236
	if (!(fbc_wm || display_wm || cursor_wm)) {
4237
		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4238
		return false;
4239
	}
4240
 
4241
	return true;
4242
}
4243
 
4244
/*
4245
 * Compute watermark values of WM[1-3],
4246
 */
4247
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4248
                  int latency_ns,
4249
                  const struct intel_watermark_params *display,
4250
                  const struct intel_watermark_params *cursor,
4251
                  int *fbc_wm, int *display_wm, int *cursor_wm)
4252
{
4253
    struct drm_crtc *crtc;
4254
    unsigned long line_time_us;
4255
    int hdisplay, htotal, pixel_size, clock;
4256
    int line_count, line_size;
4257
    int small, large;
4258
    int entries;
4259
 
4260
    if (!latency_ns) {
4261
        *fbc_wm = *display_wm = *cursor_wm = 0;
4262
        return false;
4263
    }
4264
 
4265
    crtc = intel_get_crtc_for_plane(dev, plane);
4266
    hdisplay = crtc->mode.hdisplay;
4267
    htotal = crtc->mode.htotal;
4268
    clock = crtc->mode.clock;
4269
    pixel_size = crtc->fb->bits_per_pixel / 8;
4270
 
4271
    line_time_us = (htotal * 1000) / clock;
4272
    line_count = (latency_ns / line_time_us + 1000) / 1000;
4273
    line_size = hdisplay * pixel_size;
4274
 
4275
    /* Use the minimum of the small and large buffer method for primary */
4276
    small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4277
    large = line_count * line_size;
4278
 
4279
    entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4280
    *display_wm = entries + display->guard_size;
4281
 
4282
    /*
4283
     * Spec says:
4284
     * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4285
     */
4286
    *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4287
 
4288
    /* calculate the self-refresh watermark for display cursor */
4289
    entries = line_count * pixel_size * 64;
4290
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4291
    *cursor_wm = entries + cursor->guard_size;
4292
 
4293
    return ironlake_check_srwm(dev, level,
4294
                   *fbc_wm, *display_wm, *cursor_wm,
4295
                   display, cursor);
4296
}
4297
 
4298
static void ironlake_update_wm(struct drm_device *dev)
4299
{
4300
	struct drm_i915_private *dev_priv = dev->dev_private;
4301
	int fbc_wm, plane_wm, cursor_wm;
4302
	unsigned int enabled;
4303
 
4304
	enabled = 0;
4305
	if (g4x_compute_wm0(dev, 0,
4306
			    &ironlake_display_wm_info,
4307
			    ILK_LP0_PLANE_LATENCY,
4308
			    &ironlake_cursor_wm_info,
4309
			    ILK_LP0_CURSOR_LATENCY,
4310
			    &plane_wm, &cursor_wm)) {
4311
		I915_WRITE(WM0_PIPEA_ILK,
4312
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4313
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4314
			      " plane %d, " "cursor: %d\n",
4315
			      plane_wm, cursor_wm);
4316
		enabled |= 1;
4317
	}
4318
 
4319
	if (g4x_compute_wm0(dev, 1,
4320
			    &ironlake_display_wm_info,
4321
			    ILK_LP0_PLANE_LATENCY,
4322
			    &ironlake_cursor_wm_info,
4323
			    ILK_LP0_CURSOR_LATENCY,
4324
			    &plane_wm, &cursor_wm)) {
4325
		I915_WRITE(WM0_PIPEB_ILK,
4326
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4327
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4328
			      " plane %d, cursor: %d\n",
4329
			      plane_wm, cursor_wm);
4330
		enabled |= 2;
4331
	}
4332
 
4333
	/*
4334
	 * Calculate and update the self-refresh watermark only when one
4335
	 * display plane is used.
4336
	 */
4337
	I915_WRITE(WM3_LP_ILK, 0);
4338
	I915_WRITE(WM2_LP_ILK, 0);
4339
	I915_WRITE(WM1_LP_ILK, 0);
4340
 
4341
	if (!single_plane_enabled(enabled))
4342
		return;
4343
	enabled = ffs(enabled) - 1;
4344
 
4345
	/* WM1 */
4346
	if (!ironlake_compute_srwm(dev, 1, enabled,
4347
				   ILK_READ_WM1_LATENCY() * 500,
4348
				   &ironlake_display_srwm_info,
4349
				   &ironlake_cursor_srwm_info,
4350
				   &fbc_wm, &plane_wm, &cursor_wm))
4351
		return;
4352
 
4353
	I915_WRITE(WM1_LP_ILK,
4354
		   WM1_LP_SR_EN |
4355
		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4356
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4357
		   (plane_wm << WM1_LP_SR_SHIFT) |
4358
		   cursor_wm);
4359
 
4360
	/* WM2 */
4361
	if (!ironlake_compute_srwm(dev, 2, enabled,
4362
				   ILK_READ_WM2_LATENCY() * 500,
4363
				   &ironlake_display_srwm_info,
4364
				   &ironlake_cursor_srwm_info,
4365
				   &fbc_wm, &plane_wm, &cursor_wm))
4366
		return;
4367
 
4368
	I915_WRITE(WM2_LP_ILK,
4369
		   WM2_LP_EN |
4370
		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4371
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4372
		   (plane_wm << WM1_LP_SR_SHIFT) |
4373
		   cursor_wm);
4374
 
4375
	/*
4376
	 * WM3 is unsupported on ILK, probably because we don't have latency
4377
	 * data for that power state
4378
	 */
4379
}
4380
 
4381
static void sandybridge_update_wm(struct drm_device *dev)
4382
{
4383
	struct drm_i915_private *dev_priv = dev->dev_private;
4384
	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4385
	int fbc_wm, plane_wm, cursor_wm;
4386
	unsigned int enabled;
4387
 
4388
	enabled = 0;
4389
	if (g4x_compute_wm0(dev, 0,
4390
			    &sandybridge_display_wm_info, latency,
4391
			    &sandybridge_cursor_wm_info, latency,
4392
			    &plane_wm, &cursor_wm)) {
4393
		I915_WRITE(WM0_PIPEA_ILK,
4394
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4395
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4396
			      " plane %d, " "cursor: %d\n",
4397
			      plane_wm, cursor_wm);
4398
		enabled |= 1;
4399
	}
4400
 
4401
	if (g4x_compute_wm0(dev, 1,
4402
			    &sandybridge_display_wm_info, latency,
4403
			    &sandybridge_cursor_wm_info, latency,
4404
			    &plane_wm, &cursor_wm)) {
4405
		I915_WRITE(WM0_PIPEB_ILK,
4406
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4407
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4408
			      " plane %d, cursor: %d\n",
4409
			      plane_wm, cursor_wm);
4410
		enabled |= 2;
4411
	}
4412
 
4413
	/*
4414
	 * Calculate and update the self-refresh watermark only when one
4415
	 * display plane is used.
4416
	 *
4417
	 * SNB support 3 levels of watermark.
4418
	 *
4419
	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4420
	 * and disabled in the descending order
4421
	 *
4422
	 */
4423
	I915_WRITE(WM3_LP_ILK, 0);
4424
	I915_WRITE(WM2_LP_ILK, 0);
4425
	I915_WRITE(WM1_LP_ILK, 0);
4426
 
4427
	if (!single_plane_enabled(enabled))
4428
		return;
4429
	enabled = ffs(enabled) - 1;
4430
 
4431
	/* WM1 */
4432
	if (!ironlake_compute_srwm(dev, 1, enabled,
4433
				   SNB_READ_WM1_LATENCY() * 500,
4434
				   &sandybridge_display_srwm_info,
4435
				   &sandybridge_cursor_srwm_info,
4436
				   &fbc_wm, &plane_wm, &cursor_wm))
4437
		return;
4438
 
4439
	I915_WRITE(WM1_LP_ILK,
4440
		   WM1_LP_SR_EN |
4441
		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4442
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4443
		   (plane_wm << WM1_LP_SR_SHIFT) |
4444
		   cursor_wm);
4445
 
4446
	/* WM2 */
4447
	if (!ironlake_compute_srwm(dev, 2, enabled,
4448
				   SNB_READ_WM2_LATENCY() * 500,
4449
				   &sandybridge_display_srwm_info,
4450
				   &sandybridge_cursor_srwm_info,
4451
				   &fbc_wm, &plane_wm, &cursor_wm))
4452
		return;
4453
 
4454
	I915_WRITE(WM2_LP_ILK,
4455
		   WM2_LP_EN |
4456
		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4457
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4458
		   (plane_wm << WM1_LP_SR_SHIFT) |
4459
		   cursor_wm);
4460
 
4461
	/* WM3 */
4462
	if (!ironlake_compute_srwm(dev, 3, enabled,
4463
				   SNB_READ_WM3_LATENCY() * 500,
4464
				   &sandybridge_display_srwm_info,
4465
				   &sandybridge_cursor_srwm_info,
4466
				   &fbc_wm, &plane_wm, &cursor_wm))
4467
		return;
4468
 
4469
	I915_WRITE(WM3_LP_ILK,
4470
		   WM3_LP_EN |
4471
		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4472
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4473
		   (plane_wm << WM1_LP_SR_SHIFT) |
4474
		   cursor_wm);
4475
}
4476
 
4477
/**
4478
 * intel_update_watermarks - update FIFO watermark values based on current modes
4479
 *
4480
 * Calculate watermark values for the various WM regs based on current mode
4481
 * and plane configuration.
4482
 *
4483
 * There are several cases to deal with here:
4484
 *   - normal (i.e. non-self-refresh)
4485
 *   - self-refresh (SR) mode
4486
 *   - lines are large relative to FIFO size (buffer can hold up to 2)
4487
 *   - lines are small relative to FIFO size (buffer can hold more than 2
4488
 *     lines), so need to account for TLB latency
4489
 *
4490
 *   The normal calculation is:
4491
 *     watermark = dotclock * bytes per pixel * latency
4492
 *   where latency is platform & configuration dependent (we assume pessimal
4493
 *   values here).
4494
 *
4495
 *   The SR calculation is:
4496
 *     watermark = (trunc(latency/line time)+1) * surface width *
4497
 *       bytes per pixel
4498
 *   where
4499
 *     line time = htotal / dotclock
4500
 *     surface width = hdisplay for normal plane and 64 for cursor
4501
 *   and latency is assumed to be high, as above.
4502
 *
4503
 * The final value programmed to the register should always be rounded up,
4504
 * and include an extra 2 entries to account for clock crossings.
4505
 *
4506
 * We don't use the sprite, so we can ignore that.  And on Crestline we have
4507
 * to set the non-SR watermarks to 8.
4508
 */
4509
static void intel_update_watermarks(struct drm_device *dev)
4510
{
4511
	struct drm_i915_private *dev_priv = dev->dev_private;
4512
 
4513
	if (dev_priv->display.update_wm)
4514
		dev_priv->display.update_wm(dev);
4515
}
4516
 
4517
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4518
{
4519
	return dev_priv->lvds_use_ssc && i915_panel_use_ssc
4520
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4521
}
4522
 
4523
/**
4524
 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4525
 * @crtc: CRTC structure
4526
 *
4527
 * A pipe may be connected to one or more outputs.  Based on the depth of the
4528
 * attached framebuffer, choose a good color depth to use on the pipe.
4529
 *
4530
 * If possible, match the pipe depth to the fb depth.  In some cases, this
4531
 * isn't ideal, because the connected output supports a lesser or restricted
4532
 * set of depths.  Resolve that here:
4533
 *    LVDS typically supports only 6bpc, so clamp down in that case
4534
 *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4535
 *    Displays may support a restricted set as well, check EDID and clamp as
4536
 *      appropriate.
4537
 *
4538
 * RETURNS:
4539
 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4540
 * true if they don't match).
4541
 */
4542
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4543
					 unsigned int *pipe_bpp)
4544
{
4545
	struct drm_device *dev = crtc->dev;
4546
	struct drm_i915_private *dev_priv = dev->dev_private;
4547
	struct drm_encoder *encoder;
4548
	struct drm_connector *connector;
4549
	unsigned int display_bpc = UINT_MAX, bpc;
4550
 
4551
	/* Walk the encoders & connectors on this crtc, get min bpc */
4552
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4553
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4554
 
4555
		if (encoder->crtc != crtc)
4556
			continue;
4557
 
4558
		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4559
			unsigned int lvds_bpc;
4560
 
4561
			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4562
			    LVDS_A3_POWER_UP)
4563
				lvds_bpc = 8;
4564
			else
4565
				lvds_bpc = 6;
4566
 
4567
			if (lvds_bpc < display_bpc) {
4568
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4569
				display_bpc = lvds_bpc;
4570
			}
4571
			continue;
4572
		}
4573
 
4574
		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4575
			/* Use VBT settings if we have an eDP panel */
4576
			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4577
 
4578
			if (edp_bpc < display_bpc) {
4579
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4580
				display_bpc = edp_bpc;
4581
			}
4582
			continue;
4583
		}
4584
 
4585
		/* Not one of the known troublemakers, check the EDID */
4586
		list_for_each_entry(connector, &dev->mode_config.connector_list,
4587
				    head) {
4588
			if (connector->encoder != encoder)
4589
				continue;
4590
 
4591
			/* Don't use an invalid EDID bpc value */
4592
			if (connector->display_info.bpc &&
4593
			    connector->display_info.bpc < display_bpc) {
4594
				DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4595
				display_bpc = connector->display_info.bpc;
4596
			}
4597
		}
4598
 
4599
		/*
4600
		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4601
		 * through, clamp it down.  (Note: >12bpc will be caught below.)
4602
		 */
4603
		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4604
			if (display_bpc > 8 && display_bpc < 12) {
4605
				DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
4606
				display_bpc = 12;
4607
			} else {
4608
				DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
4609
				display_bpc = 8;
4610
			}
4611
		}
4612
	}
4613
 
4614
	/*
4615
	 * We could just drive the pipe at the highest bpc all the time and
4616
	 * enable dithering as needed, but that costs bandwidth.  So choose
4617
	 * the minimum value that expresses the full color range of the fb but
4618
	 * also stays within the max display bpc discovered above.
4619
	 */
4620
 
4621
	switch (crtc->fb->depth) {
4622
	case 8:
4623
		bpc = 8; /* since we go through a colormap */
4624
		break;
4625
	case 15:
4626
	case 16:
4627
		bpc = 6; /* min is 18bpp */
4628
		break;
4629
	case 24:
4630
		bpc = min((unsigned int)8, display_bpc);
4631
		break;
4632
	case 30:
4633
		bpc = min((unsigned int)10, display_bpc);
4634
		break;
4635
	case 48:
4636
		bpc = min((unsigned int)12, display_bpc);
4637
		break;
4638
	default:
4639
		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4640
		bpc = min((unsigned int)8, display_bpc);
4641
		break;
4642
	}
4643
 
4644
	DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4645
			 bpc, display_bpc);
4646
 
4647
	*pipe_bpp = bpc * 3;
4648
 
4649
	return display_bpc != bpc;
4650
}
4651
 
4652
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4653
                  struct drm_display_mode *mode,
4654
                  struct drm_display_mode *adjusted_mode,
4655
                  int x, int y,
4656
                  struct drm_framebuffer *old_fb)
4657
{
4658
    struct drm_device *dev = crtc->dev;
4659
    struct drm_i915_private *dev_priv = dev->dev_private;
4660
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4661
    int pipe = intel_crtc->pipe;
4662
    int plane = intel_crtc->plane;
4663
    int refclk, num_connectors = 0;
4664
    intel_clock_t clock, reduced_clock;
4665
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4666
    bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4667
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4668
    struct drm_mode_config *mode_config = &dev->mode_config;
4669
    struct intel_encoder *encoder;
4670
    const intel_limit_t *limit;
4671
    int ret;
4672
    u32 temp;
4673
    u32 lvds_sync = 0;
4674
 
4675
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4676
        if (encoder->base.crtc != crtc)
4677
            continue;
4678
 
4679
        switch (encoder->type) {
4680
        case INTEL_OUTPUT_LVDS:
4681
            is_lvds = true;
4682
            break;
4683
        case INTEL_OUTPUT_SDVO:
4684
        case INTEL_OUTPUT_HDMI:
4685
            is_sdvo = true;
4686
            if (encoder->needs_tv_clock)
4687
                is_tv = true;
4688
            break;
4689
        case INTEL_OUTPUT_DVO:
4690
            is_dvo = true;
4691
            break;
4692
        case INTEL_OUTPUT_TVOUT:
4693
            is_tv = true;
4694
            break;
4695
        case INTEL_OUTPUT_ANALOG:
4696
            is_crt = true;
4697
            break;
4698
        case INTEL_OUTPUT_DISPLAYPORT:
4699
            is_dp = true;
4700
            break;
4701
        }
4702
 
4703
        num_connectors++;
4704
    }
4705
 
4706
    if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4707
        refclk = dev_priv->lvds_ssc_freq * 1000;
4708
        DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4709
                  refclk / 1000);
4710
    } else if (!IS_GEN2(dev)) {
4711
        refclk = 96000;
4712
    } else {
4713
        refclk = 48000;
4714
    }
4715
 
4716
    /*
4717
     * Returns a set of divisors for the desired target clock with the given
4718
     * refclk, or FALSE.  The returned values represent the clock equation:
4719
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4720
     */
4721
    limit = intel_limit(crtc, refclk);
4722
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4723
    if (!ok) {
4724
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
4725
        return -EINVAL;
4726
    }
4727
 
4728
    /* Ensure that the cursor is valid for the new mode before changing... */
4729
//    intel_crtc_update_cursor(crtc, true);
4730
 
4731
    if (is_lvds && dev_priv->lvds_downclock_avail) {
4732
        has_reduced_clock = limit->find_pll(limit, crtc,
4733
                            dev_priv->lvds_downclock,
4734
                            refclk,
4735
                            &reduced_clock);
4736
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
4737
            /*
4738
             * If the different P is found, it means that we can't
4739
             * switch the display clock by using the FP0/FP1.
4740
             * In such case we will disable the LVDS downclock
4741
             * feature.
4742
             */
4743
            DRM_DEBUG_KMS("Different P is found for "
4744
                      "LVDS clock/downclock\n");
4745
            has_reduced_clock = 0;
4746
        }
4747
    }
4748
    /* SDVO TV has fixed PLL values depend on its clock range,
4749
       this mirrors vbios setting. */
4750
    if (is_sdvo && is_tv) {
4751
        if (adjusted_mode->clock >= 100000
4752
            && adjusted_mode->clock < 140500) {
4753
            clock.p1 = 2;
4754
            clock.p2 = 10;
4755
            clock.n = 3;
4756
            clock.m1 = 16;
4757
            clock.m2 = 8;
4758
        } else if (adjusted_mode->clock >= 140500
4759
               && adjusted_mode->clock <= 200000) {
4760
            clock.p1 = 1;
4761
            clock.p2 = 10;
4762
            clock.n = 6;
4763
            clock.m1 = 12;
4764
            clock.m2 = 8;
4765
        }
4766
    }
4767
 
4768
    if (IS_PINEVIEW(dev)) {
4769
        fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
4770
        if (has_reduced_clock)
4771
            fp2 = (1 << reduced_clock.n) << 16 |
4772
                reduced_clock.m1 << 8 | reduced_clock.m2;
4773
    } else {
4774
        fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4775
        if (has_reduced_clock)
4776
            fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4777
                reduced_clock.m2;
4778
    }
4779
 
4780
    dpll = DPLL_VGA_MODE_DIS;
4781
 
4782
    if (!IS_GEN2(dev)) {
4783
        if (is_lvds)
4784
            dpll |= DPLLB_MODE_LVDS;
4785
        else
4786
            dpll |= DPLLB_MODE_DAC_SERIAL;
4787
        if (is_sdvo) {
4788
            int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4789
            if (pixel_multiplier > 1) {
4790
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4791
                    dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4792
            }
4793
            dpll |= DPLL_DVO_HIGH_SPEED;
4794
        }
4795
        if (is_dp)
4796
            dpll |= DPLL_DVO_HIGH_SPEED;
4797
 
4798
        /* compute bitmask from p1 value */
4799
        if (IS_PINEVIEW(dev))
4800
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4801
        else {
4802
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4803
            if (IS_G4X(dev) && has_reduced_clock)
4804
                dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4805
        }
4806
        switch (clock.p2) {
4807
        case 5:
4808
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4809
            break;
4810
        case 7:
4811
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4812
            break;
4813
        case 10:
4814
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4815
            break;
4816
        case 14:
4817
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4818
            break;
4819
        }
4820
        if (INTEL_INFO(dev)->gen >= 4)
4821
            dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4822
    } else {
4823
        if (is_lvds) {
4824
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4825
        } else {
4826
            if (clock.p1 == 2)
4827
                dpll |= PLL_P1_DIVIDE_BY_TWO;
4828
            else
4829
                dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4830
            if (clock.p2 == 4)
4831
                dpll |= PLL_P2_DIVIDE_BY_4;
4832
        }
4833
    }
4834
 
4835
    if (is_sdvo && is_tv)
4836
        dpll |= PLL_REF_INPUT_TVCLKINBC;
4837
    else if (is_tv)
4838
        /* XXX: just matching BIOS for now */
4839
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
4840
        dpll |= 3;
4841
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4842
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4843
    else
4844
        dpll |= PLL_REF_INPUT_DREFCLK;
4845
 
4846
    /* setup pipeconf */
4847
    pipeconf = I915_READ(PIPECONF(pipe));
4848
 
4849
    /* Set up the display plane register */
4850
    dspcntr = DISPPLANE_GAMMA_ENABLE;
4851
 
4852
    /* Ironlake's plane is forced to pipe, bit 24 is to
4853
       enable color space conversion */
4854
    if (pipe == 0)
4855
        dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4856
    else
4857
        dspcntr |= DISPPLANE_SEL_PIPE_B;
4858
 
4859
    if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4860
        /* Enable pixel doubling when the dot clock is > 90% of the (display)
4861
         * core speed.
4862
         *
4863
         * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4864
         * pipe == 0 check?
4865
         */
4866
        if (mode->clock >
4867
            dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4868
            pipeconf |= PIPECONF_DOUBLE_WIDE;
4869
        else
4870
            pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4871
    }
4872
 
4873
    dpll |= DPLL_VCO_ENABLE;
4874
 
4875
    DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4876
    drm_mode_debug_printmodeline(mode);
4877
 
4878
    I915_WRITE(FP0(pipe), fp);
4879
    I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4880
 
4881
    POSTING_READ(DPLL(pipe));
4882
    udelay(150);
4883
 
4884
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4885
     * This is an exception to the general rule that mode_set doesn't turn
4886
     * things on.
4887
     */
4888
    if (is_lvds) {
4889
        temp = I915_READ(LVDS);
4890
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4891
        if (pipe == 1) {
4892
            temp |= LVDS_PIPEB_SELECT;
4893
        } else {
4894
            temp &= ~LVDS_PIPEB_SELECT;
4895
        }
4896
        /* set the corresponsding LVDS_BORDER bit */
4897
        temp |= dev_priv->lvds_border_bits;
4898
        /* Set the B0-B3 data pairs corresponding to whether we're going to
4899
         * set the DPLLs for dual-channel mode or not.
4900
         */
4901
        if (clock.p2 == 7)
4902
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4903
        else
4904
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4905
 
4906
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4907
         * appropriately here, but we need to look more thoroughly into how
4908
         * panels behave in the two modes.
4909
         */
4910
        /* set the dithering flag on LVDS as needed */
4911
        if (INTEL_INFO(dev)->gen >= 4) {
4912
            if (dev_priv->lvds_dither)
4913
                temp |= LVDS_ENABLE_DITHER;
4914
            else
4915
                temp &= ~LVDS_ENABLE_DITHER;
4916
        }
4917
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4918
            lvds_sync |= LVDS_HSYNC_POLARITY;
4919
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4920
            lvds_sync |= LVDS_VSYNC_POLARITY;
4921
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
4922
            != lvds_sync) {
4923
            char flags[2] = "-+";
4924
            DRM_INFO("Changing LVDS panel from "
4925
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
4926
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
4927
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
4928
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
4929
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
4930
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4931
            temp |= lvds_sync;
4932
        }
4933
        I915_WRITE(LVDS, temp);
4934
    }
4935
 
4936
    if (is_dp) {
4937
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
4938
    }
4939
 
4940
    I915_WRITE(DPLL(pipe), dpll);
4941
 
4942
    /* Wait for the clocks to stabilize. */
4943
    POSTING_READ(DPLL(pipe));
4944
    udelay(150);
4945
 
4946
    if (INTEL_INFO(dev)->gen >= 4) {
4947
        temp = 0;
4948
        if (is_sdvo) {
4949
            temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4950
            if (temp > 1)
4951
                temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4952
            else
4953
                temp = 0;
4954
        }
4955
        I915_WRITE(DPLL_MD(pipe), temp);
4956
    } else {
4957
        /* The pixel multiplier can only be updated once the
4958
         * DPLL is enabled and the clocks are stable.
4959
         *
4960
         * So write it again.
4961
         */
4962
        I915_WRITE(DPLL(pipe), dpll);
4963
    }
4964
 
4965
    intel_crtc->lowfreq_avail = false;
4966
    if (is_lvds && has_reduced_clock && i915_powersave) {
4967
        I915_WRITE(FP1(pipe), fp2);
4968
        intel_crtc->lowfreq_avail = true;
4969
        if (HAS_PIPE_CXSR(dev)) {
4970
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4971
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4972
        }
4973
    } else {
4974
        I915_WRITE(FP1(pipe), fp);
4975
        if (HAS_PIPE_CXSR(dev)) {
4976
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4977
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4978
        }
4979
    }
4980
 
4981
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4982
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4983
        /* the chip adds 2 halflines automatically */
4984
        adjusted_mode->crtc_vdisplay -= 1;
4985
        adjusted_mode->crtc_vtotal -= 1;
4986
        adjusted_mode->crtc_vblank_start -= 1;
4987
        adjusted_mode->crtc_vblank_end -= 1;
4988
        adjusted_mode->crtc_vsync_end -= 1;
4989
        adjusted_mode->crtc_vsync_start -= 1;
4990
    } else
4991
        pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
4992
 
4993
    I915_WRITE(HTOTAL(pipe),
4994
           (adjusted_mode->crtc_hdisplay - 1) |
4995
           ((adjusted_mode->crtc_htotal - 1) << 16));
4996
    I915_WRITE(HBLANK(pipe),
4997
           (adjusted_mode->crtc_hblank_start - 1) |
4998
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
4999
    I915_WRITE(HSYNC(pipe),
5000
           (adjusted_mode->crtc_hsync_start - 1) |
5001
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
5002
 
5003
    I915_WRITE(VTOTAL(pipe),
5004
           (adjusted_mode->crtc_vdisplay - 1) |
5005
           ((adjusted_mode->crtc_vtotal - 1) << 16));
5006
    I915_WRITE(VBLANK(pipe),
5007
           (adjusted_mode->crtc_vblank_start - 1) |
5008
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
5009
    I915_WRITE(VSYNC(pipe),
5010
           (adjusted_mode->crtc_vsync_start - 1) |
5011
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
5012
 
5013
    /* pipesrc and dspsize control the size that is scaled from,
5014
     * which should always be the user's requested size.
5015
     */
5016
    I915_WRITE(DSPSIZE(plane),
5017
           ((mode->vdisplay - 1) << 16) |
5018
           (mode->hdisplay - 1));
5019
    I915_WRITE(DSPPOS(plane), 0);
5020
    I915_WRITE(PIPESRC(pipe),
5021
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5022
 
5023
    I915_WRITE(PIPECONF(pipe), pipeconf);
5024
    POSTING_READ(PIPECONF(pipe));
5025
    intel_enable_pipe(dev_priv, pipe, false);
5026
 
5027
    intel_wait_for_vblank(dev, pipe);
5028
 
5029
    I915_WRITE(DSPCNTR(plane), dspcntr);
5030
    POSTING_READ(DSPCNTR(plane));
5031
    intel_enable_plane(dev_priv, plane, pipe);
5032
 
5033
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
5034
 
5035
    intel_update_watermarks(dev);
5036
 
5037
    return ret;
5038
}
5039
 
5040
static void ironlake_update_pch_refclk(struct drm_device *dev)
5041
{
5042
	struct drm_i915_private *dev_priv = dev->dev_private;
5043
	struct drm_mode_config *mode_config = &dev->mode_config;
5044
	struct drm_crtc *crtc;
5045
	struct intel_encoder *encoder;
5046
	struct intel_encoder *has_edp_encoder = NULL;
5047
	u32 temp;
5048
	bool has_lvds = false;
5049
 
5050
	/* We need to take the global config into account */
5051
	list_for_each_entry(crtc, &mode_config->crtc_list, head) {
5052
		if (!crtc->enabled)
5053
			continue;
5054
 
5055
		list_for_each_entry(encoder, &mode_config->encoder_list,
5056
				    base.head) {
5057
			if (encoder->base.crtc != crtc)
5058
				continue;
5059
 
5060
			switch (encoder->type) {
5061
			case INTEL_OUTPUT_LVDS:
5062
				has_lvds = true;
5063
			case INTEL_OUTPUT_EDP:
5064
				has_edp_encoder = encoder;
5065
				break;
5066
			}
5067
		}
5068
	}
5069
 
5070
	/* Ironlake: try to setup display ref clock before DPLL
5071
	 * enabling. This is only under driver's control after
5072
	 * PCH B stepping, previous chipset stepping should be
5073
	 * ignoring this setting.
5074
	 */
5075
	temp = I915_READ(PCH_DREF_CONTROL);
5076
	/* Always enable nonspread source */
5077
	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5078
	temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5079
	temp &= ~DREF_SSC_SOURCE_MASK;
5080
	temp |= DREF_SSC_SOURCE_ENABLE;
5081
	I915_WRITE(PCH_DREF_CONTROL, temp);
5082
 
5083
	POSTING_READ(PCH_DREF_CONTROL);
5084
	udelay(200);
5085
 
5086
	if (has_edp_encoder) {
5087
		if (intel_panel_use_ssc(dev_priv)) {
5088
			temp |= DREF_SSC1_ENABLE;
5089
			I915_WRITE(PCH_DREF_CONTROL, temp);
5090
 
5091
			POSTING_READ(PCH_DREF_CONTROL);
5092
			udelay(200);
5093
		}
5094
		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5095
 
5096
		/* Enable CPU source on CPU attached eDP */
5097
		if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5098
			if (intel_panel_use_ssc(dev_priv))
5099
				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5100
			else
5101
				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5102
		} else {
5103
			/* Enable SSC on PCH eDP if needed */
5104
			if (intel_panel_use_ssc(dev_priv)) {
5105
				DRM_ERROR("enabling SSC on PCH\n");
5106
				temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
5107
			}
5108
		}
5109
		I915_WRITE(PCH_DREF_CONTROL, temp);
5110
		POSTING_READ(PCH_DREF_CONTROL);
5111
		udelay(200);
5112
	}
5113
}
5114
 
5115
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5116
                  struct drm_display_mode *mode,
5117
                  struct drm_display_mode *adjusted_mode,
5118
                  int x, int y,
5119
                  struct drm_framebuffer *old_fb)
5120
{
5121
    struct drm_device *dev = crtc->dev;
5122
    struct drm_i915_private *dev_priv = dev->dev_private;
5123
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5124
    int pipe = intel_crtc->pipe;
5125
    int plane = intel_crtc->plane;
5126
    int refclk, num_connectors = 0;
5127
    intel_clock_t clock, reduced_clock;
5128
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5129
    bool ok, has_reduced_clock = false, is_sdvo = false;
5130
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5131
    struct intel_encoder *has_edp_encoder = NULL;
5132
    struct drm_mode_config *mode_config = &dev->mode_config;
5133
    struct intel_encoder *encoder;
5134
    const intel_limit_t *limit;
5135
    int ret;
5136
    struct fdi_m_n m_n = {0};
5137
    u32 temp;
5138
    u32 lvds_sync = 0;
5139
    int target_clock, pixel_multiplier, lane, link_bw, factor;
5140
    unsigned int pipe_bpp;
5141
    bool dither;
5142
 
5143
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5144
        if (encoder->base.crtc != crtc)
5145
            continue;
5146
 
5147
        switch (encoder->type) {
5148
        case INTEL_OUTPUT_LVDS:
5149
            is_lvds = true;
5150
            break;
5151
        case INTEL_OUTPUT_SDVO:
5152
        case INTEL_OUTPUT_HDMI:
5153
            is_sdvo = true;
5154
            if (encoder->needs_tv_clock)
5155
                is_tv = true;
5156
            break;
5157
        case INTEL_OUTPUT_TVOUT:
5158
            is_tv = true;
5159
            break;
5160
        case INTEL_OUTPUT_ANALOG:
5161
            is_crt = true;
5162
            break;
5163
        case INTEL_OUTPUT_DISPLAYPORT:
5164
            is_dp = true;
5165
            break;
5166
        case INTEL_OUTPUT_EDP:
5167
            has_edp_encoder = encoder;
5168
            break;
5169
        }
5170
 
5171
        num_connectors++;
5172
    }
5173
 
5174
    if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5175
        refclk = dev_priv->lvds_ssc_freq * 1000;
5176
        DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5177
                  refclk / 1000);
5178
    } else {
5179
        refclk = 96000;
5180
        if (!has_edp_encoder ||
5181
            intel_encoder_is_pch_edp(&has_edp_encoder->base))
5182
            refclk = 120000; /* 120Mhz refclk */
5183
    }
5184
 
5185
    /*
5186
     * Returns a set of divisors for the desired target clock with the given
5187
     * refclk, or FALSE.  The returned values represent the clock equation:
5188
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5189
     */
5190
    limit = intel_limit(crtc, refclk);
5191
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
5192
    if (!ok) {
5193
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
5194
        return -EINVAL;
5195
    }
5196
 
5197
    /* Ensure that the cursor is valid for the new mode before changing... */
5198
//    intel_crtc_update_cursor(crtc, true);
5199
 
5200
    if (is_lvds && dev_priv->lvds_downclock_avail) {
5201
        has_reduced_clock = limit->find_pll(limit, crtc,
5202
                            dev_priv->lvds_downclock,
5203
                            refclk,
5204
                            &reduced_clock);
5205
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
5206
            /*
5207
             * If the different P is found, it means that we can't
5208
             * switch the display clock by using the FP0/FP1.
5209
             * In such case we will disable the LVDS downclock
5210
             * feature.
5211
             */
5212
            DRM_DEBUG_KMS("Different P is found for "
5213
                      "LVDS clock/downclock\n");
5214
            has_reduced_clock = 0;
5215
        }
5216
    }
5217
    /* SDVO TV has fixed PLL values depend on its clock range,
5218
       this mirrors vbios setting. */
5219
    if (is_sdvo && is_tv) {
5220
        if (adjusted_mode->clock >= 100000
5221
            && adjusted_mode->clock < 140500) {
5222
            clock.p1 = 2;
5223
            clock.p2 = 10;
5224
            clock.n = 3;
5225
            clock.m1 = 16;
5226
            clock.m2 = 8;
5227
        } else if (adjusted_mode->clock >= 140500
5228
               && adjusted_mode->clock <= 200000) {
5229
            clock.p1 = 1;
5230
            clock.p2 = 10;
5231
            clock.n = 6;
5232
            clock.m1 = 12;
5233
            clock.m2 = 8;
5234
        }
5235
    }
5236
 
5237
    /* FDI link */
5238
    pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5239
    lane = 0;
5240
    /* CPU eDP doesn't require FDI link, so just set DP M/N
5241
       according to current link config */
5242
    if (has_edp_encoder &&
5243
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5244
        target_clock = mode->clock;
5245
        intel_edp_link_config(has_edp_encoder,
5246
                      &lane, &link_bw);
5247
    } else {
5248
        /* [e]DP over FDI requires target mode clock
5249
           instead of link clock */
5250
        if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5251
            target_clock = mode->clock;
5252
        else
5253
            target_clock = adjusted_mode->clock;
5254
 
5255
        /* FDI is a binary signal running at ~2.7GHz, encoding
5256
         * each output octet as 10 bits. The actual frequency
5257
         * is stored as a divider into a 100MHz clock, and the
5258
         * mode pixel clock is stored in units of 1KHz.
5259
         * Hence the bw of each lane in terms of the mode signal
5260
         * is:
5261
         */
5262
        link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5263
    }
5264
 
5265
    /* determine panel color depth */
5266
    temp = I915_READ(PIPECONF(pipe));
5267
    temp &= ~PIPE_BPC_MASK;
5268
    dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
5269
    switch (pipe_bpp) {
5270
    case 18:
5271
        temp |= PIPE_6BPC;
5272
        break;
5273
    case 24:
5274
        temp |= PIPE_8BPC;
5275
        break;
5276
    case 30:
5277
        temp |= PIPE_10BPC;
5278
        break;
5279
    case 36:
5280
        temp |= PIPE_12BPC;
5281
        break;
5282
    default:
5283
        WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5284
            pipe_bpp);
5285
        temp |= PIPE_8BPC;
5286
        pipe_bpp = 24;
5287
        break;
5288
    }
5289
 
5290
    intel_crtc->bpp = pipe_bpp;
5291
    I915_WRITE(PIPECONF(pipe), temp);
5292
 
5293
    if (!lane) {
5294
        /*
5295
         * Account for spread spectrum to avoid
5296
         * oversubscribing the link. Max center spread
5297
         * is 2.5%; use 5% for safety's sake.
5298
         */
5299
        u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5300
        lane = bps / (link_bw * 8) + 1;
5301
    }
5302
 
5303
    intel_crtc->fdi_lanes = lane;
5304
 
5305
    if (pixel_multiplier > 1)
5306
        link_bw *= pixel_multiplier;
5307
    ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5308
                 &m_n);
5309
 
5310
    ironlake_update_pch_refclk(dev);
5311
 
5312
    fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5313
    if (has_reduced_clock)
5314
        fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5315
            reduced_clock.m2;
5316
 
5317
    /* Enable autotuning of the PLL clock (if permissible) */
5318
    factor = 21;
5319
    if (is_lvds) {
5320
        if ((intel_panel_use_ssc(dev_priv) &&
5321
             dev_priv->lvds_ssc_freq == 100) ||
5322
            (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5323
            factor = 25;
5324
    } else if (is_sdvo && is_tv)
5325
        factor = 20;
5326
 
5327
    if (clock.m < factor * clock.n)
5328
        fp |= FP_CB_TUNE;
5329
 
5330
    dpll = 0;
5331
 
5332
    if (is_lvds)
5333
        dpll |= DPLLB_MODE_LVDS;
5334
    else
5335
        dpll |= DPLLB_MODE_DAC_SERIAL;
5336
    if (is_sdvo) {
5337
        int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5338
        if (pixel_multiplier > 1) {
5339
            dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5340
        }
5341
        dpll |= DPLL_DVO_HIGH_SPEED;
5342
    }
5343
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5344
        dpll |= DPLL_DVO_HIGH_SPEED;
5345
 
5346
    /* compute bitmask from p1 value */
5347
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5348
    /* also FPA1 */
5349
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5350
 
5351
    switch (clock.p2) {
5352
    case 5:
5353
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5354
        break;
5355
    case 7:
5356
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5357
        break;
5358
    case 10:
5359
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5360
        break;
5361
    case 14:
5362
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5363
        break;
5364
    }
5365
 
5366
    if (is_sdvo && is_tv)
5367
        dpll |= PLL_REF_INPUT_TVCLKINBC;
5368
    else if (is_tv)
5369
        /* XXX: just matching BIOS for now */
5370
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
5371
        dpll |= 3;
5372
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5373
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5374
    else
5375
        dpll |= PLL_REF_INPUT_DREFCLK;
5376
 
5377
    /* setup pipeconf */
5378
    pipeconf = I915_READ(PIPECONF(pipe));
5379
 
5380
    /* Set up the display plane register */
5381
    dspcntr = DISPPLANE_GAMMA_ENABLE;
5382
 
5383
    DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5384
    drm_mode_debug_printmodeline(mode);
5385
 
5386
    /* PCH eDP needs FDI, but CPU eDP does not */
5387
    if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5388
        I915_WRITE(PCH_FP0(pipe), fp);
5389
        I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5390
 
5391
        POSTING_READ(PCH_DPLL(pipe));
5392
        udelay(150);
5393
    }
5394
 
5395
    /* enable transcoder DPLL */
5396
    if (HAS_PCH_CPT(dev)) {
5397
        temp = I915_READ(PCH_DPLL_SEL);
5398
        switch (pipe) {
5399
        case 0:
5400
            temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
5401
            break;
5402
        case 1:
5403
            temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
5404
            break;
5405
        case 2:
5406
            /* FIXME: manage transcoder PLLs? */
5407
            temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
5408
            break;
5409
        default:
5410
            BUG();
5411
        }
5412
        I915_WRITE(PCH_DPLL_SEL, temp);
5413
 
5414
        POSTING_READ(PCH_DPLL_SEL);
5415
        udelay(150);
5416
    }
5417
 
5418
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5419
     * This is an exception to the general rule that mode_set doesn't turn
5420
     * things on.
5421
     */
5422
    if (is_lvds) {
5423
        temp = I915_READ(PCH_LVDS);
5424
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5425
        if (pipe == 1) {
5426
            if (HAS_PCH_CPT(dev))
5427
                temp |= PORT_TRANS_B_SEL_CPT;
5428
            else
5429
                temp |= LVDS_PIPEB_SELECT;
5430
        } else {
5431
            if (HAS_PCH_CPT(dev))
5432
                temp &= ~PORT_TRANS_SEL_MASK;
5433
            else
5434
                temp &= ~LVDS_PIPEB_SELECT;
5435
        }
5436
        /* set the corresponsding LVDS_BORDER bit */
5437
        temp |= dev_priv->lvds_border_bits;
5438
        /* Set the B0-B3 data pairs corresponding to whether we're going to
5439
         * set the DPLLs for dual-channel mode or not.
5440
         */
5441
        if (clock.p2 == 7)
5442
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5443
        else
5444
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5445
 
5446
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5447
         * appropriately here, but we need to look more thoroughly into how
5448
         * panels behave in the two modes.
5449
         */
5450
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5451
            lvds_sync |= LVDS_HSYNC_POLARITY;
5452
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5453
            lvds_sync |= LVDS_VSYNC_POLARITY;
5454
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5455
            != lvds_sync) {
5456
            char flags[2] = "-+";
5457
            DRM_INFO("Changing LVDS panel from "
5458
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5459
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
5460
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
5461
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5462
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5463
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5464
            temp |= lvds_sync;
5465
        }
5466
        I915_WRITE(PCH_LVDS, temp);
5467
    }
5468
 
5469
    pipeconf &= ~PIPECONF_DITHER_EN;
5470
    pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5471
    if ((is_lvds && dev_priv->lvds_dither) || dither) {
5472
        pipeconf |= PIPECONF_DITHER_EN;
5473
        pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5474
    }
5475
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5476
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
5477
    } else {
5478
        /* For non-DP output, clear any trans DP clock recovery setting.*/
5479
        I915_WRITE(TRANSDATA_M1(pipe), 0);
5480
        I915_WRITE(TRANSDATA_N1(pipe), 0);
5481
        I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5482
        I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5483
    }
5484
 
5485
    if (!has_edp_encoder ||
5486
        intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5487
        I915_WRITE(PCH_DPLL(pipe), dpll);
5488
 
5489
        /* Wait for the clocks to stabilize. */
5490
        POSTING_READ(PCH_DPLL(pipe));
5491
        udelay(150);
5492
 
5493
        /* The pixel multiplier can only be updated once the
5494
         * DPLL is enabled and the clocks are stable.
5495
         *
5496
         * So write it again.
5497
         */
5498
        I915_WRITE(PCH_DPLL(pipe), dpll);
5499
    }
5500
 
5501
    intel_crtc->lowfreq_avail = false;
5502
    if (is_lvds && has_reduced_clock && i915_powersave) {
5503
        I915_WRITE(PCH_FP1(pipe), fp2);
5504
        intel_crtc->lowfreq_avail = true;
5505
        if (HAS_PIPE_CXSR(dev)) {
5506
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5507
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5508
        }
5509
    } else {
5510
        I915_WRITE(PCH_FP1(pipe), fp);
5511
        if (HAS_PIPE_CXSR(dev)) {
5512
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5513
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5514
        }
5515
    }
5516
 
5517
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5518
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5519
        /* the chip adds 2 halflines automatically */
5520
        adjusted_mode->crtc_vdisplay -= 1;
5521
        adjusted_mode->crtc_vtotal -= 1;
5522
        adjusted_mode->crtc_vblank_start -= 1;
5523
        adjusted_mode->crtc_vblank_end -= 1;
5524
        adjusted_mode->crtc_vsync_end -= 1;
5525
        adjusted_mode->crtc_vsync_start -= 1;
5526
    } else
5527
        pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
5528
 
5529
    I915_WRITE(HTOTAL(pipe),
5530
           (adjusted_mode->crtc_hdisplay - 1) |
5531
           ((adjusted_mode->crtc_htotal - 1) << 16));
5532
    I915_WRITE(HBLANK(pipe),
5533
           (adjusted_mode->crtc_hblank_start - 1) |
5534
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
5535
    I915_WRITE(HSYNC(pipe),
5536
           (adjusted_mode->crtc_hsync_start - 1) |
5537
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
5538
 
5539
    I915_WRITE(VTOTAL(pipe),
5540
           (adjusted_mode->crtc_vdisplay - 1) |
5541
           ((adjusted_mode->crtc_vtotal - 1) << 16));
5542
    I915_WRITE(VBLANK(pipe),
5543
           (adjusted_mode->crtc_vblank_start - 1) |
5544
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
5545
    I915_WRITE(VSYNC(pipe),
5546
           (adjusted_mode->crtc_vsync_start - 1) |
5547
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
5548
 
5549
    /* pipesrc controls the size that is scaled from, which should
5550
     * always be the user's requested size.
5551
     */
5552
    I915_WRITE(PIPESRC(pipe),
5553
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5554
 
5555
    I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5556
    I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5557
    I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5558
    I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5559
 
5560
    if (has_edp_encoder &&
5561
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5562
        ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5563
    }
5564
 
5565
    I915_WRITE(PIPECONF(pipe), pipeconf);
5566
    POSTING_READ(PIPECONF(pipe));
5567
 
5568
    intel_wait_for_vblank(dev, pipe);
5569
 
5570
    if (IS_GEN5(dev)) {
5571
        /* enable address swizzle for tiling buffer */
5572
        temp = I915_READ(DISP_ARB_CTL);
5573
        I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
5574
    }
5575
 
5576
    I915_WRITE(DSPCNTR(plane), dspcntr);
5577
    POSTING_READ(DSPCNTR(plane));
5578
 
5579
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
5580
 
5581
    intel_update_watermarks(dev);
5582
 
5583
    return ret;
5584
}
5585
 
2330 Serge 5586
static int intel_crtc_mode_set(struct drm_crtc *crtc,
5587
			       struct drm_display_mode *mode,
5588
			       struct drm_display_mode *adjusted_mode,
5589
			       int x, int y,
5590
			       struct drm_framebuffer *old_fb)
5591
{
5592
	struct drm_device *dev = crtc->dev;
5593
	struct drm_i915_private *dev_priv = dev->dev_private;
5594
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5595
	int pipe = intel_crtc->pipe;
5596
	int ret;
2327 Serge 5597
 
2330 Serge 5598
//	drm_vblank_pre_modeset(dev, pipe);
2327 Serge 5599
 
2330 Serge 5600
	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5601
					      x, y, old_fb);
2327 Serge 5602
 
2330 Serge 5603
//	drm_vblank_post_modeset(dev, pipe);
2327 Serge 5604
 
2330 Serge 5605
	intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
2327 Serge 5606
 
2330 Serge 5607
	return ret;
5608
}
2327 Serge 5609
 
5610
/** Loads the palette/gamma unit for the CRTC with the prepared values */
5611
void intel_crtc_load_lut(struct drm_crtc *crtc)
5612
{
5613
	struct drm_device *dev = crtc->dev;
5614
	struct drm_i915_private *dev_priv = dev->dev_private;
5615
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5616
	int palreg = PALETTE(intel_crtc->pipe);
5617
	int i;
5618
 
5619
	/* The clocks have to be on to load the palette. */
5620
	if (!crtc->enabled)
5621
		return;
5622
 
5623
	/* use legacy palette for Ironlake */
5624
	if (HAS_PCH_SPLIT(dev))
5625
		palreg = LGC_PALETTE(intel_crtc->pipe);
5626
 
5627
	for (i = 0; i < 256; i++) {
5628
		I915_WRITE(palreg + 4 * i,
5629
			   (intel_crtc->lut_r[i] << 16) |
5630
			   (intel_crtc->lut_g[i] << 8) |
5631
			   intel_crtc->lut_b[i]);
5632
	}
5633
}
5634
 
5635
 
5636
 
5637
 
5638
 
5639
 
5640
 
5641
 
5642
 
5643
 
5644
 
5645
 
5646
 
5647
 
5648
 
5649
 
5650
 
5651
 
5652
 
5653
 
5654
 
5655
 
5656
 
5657
 
5658
 
5659
 
5660
 
5661
 
5662
 
5663
 
5664
 
5665
 
5666
 
5667
 
5668
 
5669
 
5670
 
2332 Serge 5671
/** Sets the color ramps on behalf of RandR */
5672
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
5673
				 u16 blue, int regno)
5674
{
5675
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 5676
 
2332 Serge 5677
	intel_crtc->lut_r[regno] = red >> 8;
5678
	intel_crtc->lut_g[regno] = green >> 8;
5679
	intel_crtc->lut_b[regno] = blue >> 8;
5680
}
2327 Serge 5681
 
2332 Serge 5682
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5683
			     u16 *blue, int regno)
5684
{
5685
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 5686
 
2332 Serge 5687
	*red = intel_crtc->lut_r[regno] << 8;
5688
	*green = intel_crtc->lut_g[regno] << 8;
5689
	*blue = intel_crtc->lut_b[regno] << 8;
5690
}
2327 Serge 5691
 
2330 Serge 5692
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
5693
				 u16 *blue, uint32_t start, uint32_t size)
5694
{
5695
	int end = (start + size > 256) ? 256 : start + size, i;
5696
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 5697
 
2330 Serge 5698
	for (i = start; i < end; i++) {
5699
		intel_crtc->lut_r[i] = red[i] >> 8;
5700
		intel_crtc->lut_g[i] = green[i] >> 8;
5701
		intel_crtc->lut_b[i] = blue[i] >> 8;
5702
	}
2327 Serge 5703
 
2330 Serge 5704
	intel_crtc_load_lut(crtc);
5705
}
2327 Serge 5706
 
2330 Serge 5707
/**
5708
 * Get a pipe with a simple mode set on it for doing load-based monitor
5709
 * detection.
5710
 *
5711
 * It will be up to the load-detect code to adjust the pipe as appropriate for
5712
 * its requirements.  The pipe will be connected to no other encoders.
5713
 *
5714
 * Currently this code will only succeed if there is a pipe with no encoders
5715
 * configured for it.  In the future, it could choose to temporarily disable
5716
 * some outputs to free up a pipe for its use.
5717
 *
5718
 * \return crtc, or NULL if no pipes are available.
5719
 */
2327 Serge 5720
 
2330 Serge 5721
/* VESA 640x480x72Hz mode to set on the pipe */
5722
static struct drm_display_mode load_detect_mode = {
5723
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
5724
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5725
};
2327 Serge 5726
 
5727
 
5728
 
5729
 
5730
 
2330 Serge 5731
static u32
5732
intel_framebuffer_pitch_for_width(int width, int bpp)
5733
{
5734
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
5735
	return ALIGN(pitch, 64);
5736
}
2327 Serge 5737
 
2330 Serge 5738
static u32
5739
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
5740
{
5741
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5742
	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
5743
}
2327 Serge 5744
 
2330 Serge 5745
static struct drm_framebuffer *
5746
intel_framebuffer_create_for_mode(struct drm_device *dev,
5747
				  struct drm_display_mode *mode,
5748
				  int depth, int bpp)
5749
{
5750
	struct drm_i915_gem_object *obj;
5751
	struct drm_mode_fb_cmd mode_cmd;
2327 Serge 5752
 
2330 Serge 5753
//	obj = i915_gem_alloc_object(dev,
5754
//				    intel_framebuffer_size_for_mode(mode, bpp));
5755
//	if (obj == NULL)
5756
		return ERR_PTR(-ENOMEM);
2327 Serge 5757
 
2330 Serge 5758
//	mode_cmd.width = mode->hdisplay;
5759
//	mode_cmd.height = mode->vdisplay;
5760
//	mode_cmd.depth = depth;
5761
//	mode_cmd.bpp = bpp;
5762
//	mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
2327 Serge 5763
 
2330 Serge 5764
//	return intel_framebuffer_create(dev, &mode_cmd, obj);
5765
}
2327 Serge 5766
 
2330 Serge 5767
static struct drm_framebuffer *
5768
mode_fits_in_fbdev(struct drm_device *dev,
5769
		   struct drm_display_mode *mode)
5770
{
5771
	struct drm_i915_private *dev_priv = dev->dev_private;
5772
	struct drm_i915_gem_object *obj;
5773
	struct drm_framebuffer *fb;
2327 Serge 5774
 
2330 Serge 5775
//	if (dev_priv->fbdev == NULL)
5776
//		return NULL;
2327 Serge 5777
 
2330 Serge 5778
//	obj = dev_priv->fbdev->ifb.obj;
5779
//	if (obj == NULL)
5780
//		return NULL;
2327 Serge 5781
 
2330 Serge 5782
//	fb = &dev_priv->fbdev->ifb.base;
5783
//	if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
5784
//							  fb->bits_per_pixel))
5785
		return NULL;
2327 Serge 5786
 
2330 Serge 5787
//	if (obj->base.size < mode->vdisplay * fb->pitch)
5788
//		return NULL;
2327 Serge 5789
 
2330 Serge 5790
//	return fb;
5791
}
2327 Serge 5792
 
2330 Serge 5793
bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5794
				struct drm_connector *connector,
5795
				struct drm_display_mode *mode,
5796
				struct intel_load_detect_pipe *old)
5797
{
5798
	struct intel_crtc *intel_crtc;
5799
	struct drm_crtc *possible_crtc;
5800
	struct drm_encoder *encoder = &intel_encoder->base;
5801
	struct drm_crtc *crtc = NULL;
5802
	struct drm_device *dev = encoder->dev;
5803
	struct drm_framebuffer *old_fb;
5804
	int i = -1;
2327 Serge 5805
 
2330 Serge 5806
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5807
		      connector->base.id, drm_get_connector_name(connector),
5808
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 5809
 
2330 Serge 5810
	/*
5811
	 * Algorithm gets a little messy:
5812
	 *
5813
	 *   - if the connector already has an assigned crtc, use it (but make
5814
	 *     sure it's on first)
5815
	 *
5816
	 *   - try to find the first unused crtc that can drive this connector,
5817
	 *     and use that if we find one
5818
	 */
2327 Serge 5819
 
2330 Serge 5820
	/* See if we already have a CRTC for this connector */
5821
	if (encoder->crtc) {
5822
		crtc = encoder->crtc;
2327 Serge 5823
 
2330 Serge 5824
		intel_crtc = to_intel_crtc(crtc);
5825
		old->dpms_mode = intel_crtc->dpms_mode;
5826
		old->load_detect_temp = false;
2327 Serge 5827
 
2330 Serge 5828
		/* Make sure the crtc and connector are running */
5829
		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5830
			struct drm_encoder_helper_funcs *encoder_funcs;
5831
			struct drm_crtc_helper_funcs *crtc_funcs;
2327 Serge 5832
 
2330 Serge 5833
			crtc_funcs = crtc->helper_private;
5834
			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
2327 Serge 5835
 
2330 Serge 5836
			encoder_funcs = encoder->helper_private;
5837
			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5838
		}
2327 Serge 5839
 
2330 Serge 5840
		return true;
5841
	}
2327 Serge 5842
 
2330 Serge 5843
	/* Find an unused one (if possible) */
5844
	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
5845
		i++;
5846
		if (!(encoder->possible_crtcs & (1 << i)))
5847
			continue;
5848
		if (!possible_crtc->enabled) {
5849
			crtc = possible_crtc;
5850
			break;
5851
		}
5852
	}
2327 Serge 5853
 
2330 Serge 5854
	/*
5855
	 * If we didn't find an unused CRTC, don't use any.
5856
	 */
5857
	if (!crtc) {
5858
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
5859
		return false;
5860
	}
2327 Serge 5861
 
2330 Serge 5862
	encoder->crtc = crtc;
5863
	connector->encoder = encoder;
2327 Serge 5864
 
2330 Serge 5865
	intel_crtc = to_intel_crtc(crtc);
5866
	old->dpms_mode = intel_crtc->dpms_mode;
5867
	old->load_detect_temp = true;
5868
	old->release_fb = NULL;
2327 Serge 5869
 
2330 Serge 5870
	if (!mode)
5871
		mode = &load_detect_mode;
2327 Serge 5872
 
2330 Serge 5873
	old_fb = crtc->fb;
2327 Serge 5874
 
2330 Serge 5875
	/* We need a framebuffer large enough to accommodate all accesses
5876
	 * that the plane may generate whilst we perform load detection.
5877
	 * We can not rely on the fbcon either being present (we get called
5878
	 * during its initialisation to detect all boot displays, or it may
5879
	 * not even exist) or that it is large enough to satisfy the
5880
	 * requested mode.
5881
	 */
5882
	crtc->fb = mode_fits_in_fbdev(dev, mode);
5883
	if (crtc->fb == NULL) {
5884
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5885
		crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5886
		old->release_fb = crtc->fb;
5887
	} else
5888
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5889
	if (IS_ERR(crtc->fb)) {
5890
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5891
		crtc->fb = old_fb;
5892
		return false;
5893
	}
2327 Serge 5894
 
2330 Serge 5895
	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
5896
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5897
		if (old->release_fb)
5898
			old->release_fb->funcs->destroy(old->release_fb);
5899
		crtc->fb = old_fb;
5900
		return false;
5901
	}
2327 Serge 5902
 
2330 Serge 5903
	/* let the connector get through one full cycle before testing */
5904
	intel_wait_for_vblank(dev, intel_crtc->pipe);
2327 Serge 5905
 
2330 Serge 5906
	return true;
5907
}
2327 Serge 5908
 
2330 Serge 5909
void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5910
				    struct drm_connector *connector,
5911
				    struct intel_load_detect_pipe *old)
5912
{
5913
	struct drm_encoder *encoder = &intel_encoder->base;
5914
	struct drm_device *dev = encoder->dev;
5915
	struct drm_crtc *crtc = encoder->crtc;
5916
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5917
	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2327 Serge 5918
 
2330 Serge 5919
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5920
		      connector->base.id, drm_get_connector_name(connector),
5921
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 5922
 
2330 Serge 5923
	if (old->load_detect_temp) {
5924
		connector->encoder = NULL;
5925
		drm_helper_disable_unused_functions(dev);
2327 Serge 5926
 
2330 Serge 5927
		if (old->release_fb)
5928
			old->release_fb->funcs->destroy(old->release_fb);
2327 Serge 5929
 
2330 Serge 5930
		return;
5931
	}
2327 Serge 5932
 
2330 Serge 5933
	/* Switch crtc and encoder back off if necessary */
5934
	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
5935
		encoder_funcs->dpms(encoder, old->dpms_mode);
5936
		crtc_funcs->dpms(crtc, old->dpms_mode);
5937
	}
5938
}
2327 Serge 5939
 
2330 Serge 5940
/* Returns the clock of the currently programmed mode of the given pipe. */
5941
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
5942
{
5943
	struct drm_i915_private *dev_priv = dev->dev_private;
5944
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5945
	int pipe = intel_crtc->pipe;
5946
	u32 dpll = I915_READ(DPLL(pipe));
5947
	u32 fp;
5948
	intel_clock_t clock;
2327 Serge 5949
 
2330 Serge 5950
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
5951
		fp = I915_READ(FP0(pipe));
5952
	else
5953
		fp = I915_READ(FP1(pipe));
2327 Serge 5954
 
2330 Serge 5955
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
5956
	if (IS_PINEVIEW(dev)) {
5957
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
5958
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
5959
	} else {
5960
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
5961
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
5962
	}
2327 Serge 5963
 
2330 Serge 5964
	if (!IS_GEN2(dev)) {
5965
		if (IS_PINEVIEW(dev))
5966
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
5967
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
5968
		else
5969
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
5970
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 5971
 
2330 Serge 5972
		switch (dpll & DPLL_MODE_MASK) {
5973
		case DPLLB_MODE_DAC_SERIAL:
5974
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
5975
				5 : 10;
5976
			break;
5977
		case DPLLB_MODE_LVDS:
5978
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
5979
				7 : 14;
5980
			break;
5981
		default:
5982
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
5983
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
5984
			return 0;
5985
		}
2327 Serge 5986
 
2330 Serge 5987
		/* XXX: Handle the 100Mhz refclk */
5988
		intel_clock(dev, 96000, &clock);
5989
	} else {
5990
		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
2327 Serge 5991
 
2330 Serge 5992
		if (is_lvds) {
5993
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
5994
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
5995
			clock.p2 = 14;
2327 Serge 5996
 
2330 Serge 5997
			if ((dpll & PLL_REF_INPUT_MASK) ==
5998
			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5999
				/* XXX: might not be 66MHz */
6000
				intel_clock(dev, 66000, &clock);
6001
			} else
6002
				intel_clock(dev, 48000, &clock);
6003
		} else {
6004
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6005
				clock.p1 = 2;
6006
			else {
6007
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6008
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6009
			}
6010
			if (dpll & PLL_P2_DIVIDE_BY_4)
6011
				clock.p2 = 4;
6012
			else
6013
				clock.p2 = 2;
2327 Serge 6014
 
2330 Serge 6015
			intel_clock(dev, 48000, &clock);
6016
		}
6017
	}
2327 Serge 6018
 
2330 Serge 6019
	/* XXX: It would be nice to validate the clocks, but we can't reuse
6020
	 * i830PllIsValid() because it relies on the xf86_config connector
6021
	 * configuration being accurate, which it isn't necessarily.
6022
	 */
2327 Serge 6023
 
2330 Serge 6024
	return clock.dot;
6025
}
2327 Serge 6026
 
2330 Serge 6027
/** Returns the currently programmed mode of the given pipe. */
6028
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6029
					     struct drm_crtc *crtc)
6030
{
6031
	struct drm_i915_private *dev_priv = dev->dev_private;
6032
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6033
	int pipe = intel_crtc->pipe;
6034
	struct drm_display_mode *mode;
6035
	int htot = I915_READ(HTOTAL(pipe));
6036
	int hsync = I915_READ(HSYNC(pipe));
6037
	int vtot = I915_READ(VTOTAL(pipe));
6038
	int vsync = I915_READ(VSYNC(pipe));
2327 Serge 6039
 
2330 Serge 6040
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6041
	if (!mode)
6042
		return NULL;
6043
 
6044
	mode->clock = intel_crtc_clock_get(dev, crtc);
6045
	mode->hdisplay = (htot & 0xffff) + 1;
6046
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6047
	mode->hsync_start = (hsync & 0xffff) + 1;
6048
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6049
	mode->vdisplay = (vtot & 0xffff) + 1;
6050
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6051
	mode->vsync_start = (vsync & 0xffff) + 1;
6052
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6053
 
6054
	drm_mode_set_name(mode);
6055
	drm_mode_set_crtcinfo(mode, 0);
6056
 
6057
	return mode;
6058
}
6059
 
6060
#define GPU_IDLE_TIMEOUT 500 /* ms */
6061
 
6062
 
6063
 
6064
 
6065
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
6066
 
6067
 
6068
 
6069
 
2327 Serge 6070
static void intel_increase_pllclock(struct drm_crtc *crtc)
6071
{
6072
	struct drm_device *dev = crtc->dev;
6073
	drm_i915_private_t *dev_priv = dev->dev_private;
6074
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6075
	int pipe = intel_crtc->pipe;
6076
	int dpll_reg = DPLL(pipe);
6077
	int dpll;
6078
 
6079
	if (HAS_PCH_SPLIT(dev))
6080
		return;
6081
 
6082
	if (!dev_priv->lvds_downclock_avail)
6083
		return;
6084
 
6085
	dpll = I915_READ(dpll_reg);
6086
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
6087
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
6088
 
6089
		/* Unlock panel regs */
6090
		I915_WRITE(PP_CONTROL,
6091
			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
6092
 
6093
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
6094
		I915_WRITE(dpll_reg, dpll);
6095
		intel_wait_for_vblank(dev, pipe);
6096
 
6097
		dpll = I915_READ(dpll_reg);
6098
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
6099
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
6100
 
6101
		/* ...and lock them again */
6102
		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
6103
	}
6104
 
6105
	/* Schedule downclock */
6106
//	mod_timer(&intel_crtc->idle_timer, jiffies +
6107
//		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6108
}
6109
 
6110
 
6111
 
6112
 
6113
 
6114
 
6115
 
6116
 
6117
 
6118
 
6119
 
6120
 
6121
 
6122
 
6123
 
6124
 
6125
 
6126
 
6127
 
6128
 
6129
 
6130
 
2330 Serge 6131
static void intel_crtc_destroy(struct drm_crtc *crtc)
6132
{
6133
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6134
	struct drm_device *dev = crtc->dev;
6135
	struct intel_unpin_work *work;
6136
	unsigned long flags;
2327 Serge 6137
 
2330 Serge 6138
	spin_lock_irqsave(&dev->event_lock, flags);
6139
	work = intel_crtc->unpin_work;
6140
	intel_crtc->unpin_work = NULL;
6141
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 6142
 
2330 Serge 6143
	if (work) {
6144
//		cancel_work_sync(&work->work);
6145
		kfree(work);
6146
	}
2327 Serge 6147
 
2330 Serge 6148
	drm_crtc_cleanup(crtc);
2327 Serge 6149
 
2330 Serge 6150
	kfree(intel_crtc);
6151
}
2327 Serge 6152
 
6153
 
6154
 
6155
 
6156
 
6157
 
6158
 
6159
 
6160
 
6161
 
6162
 
6163
 
6164
 
6165
 
6166
 
6167
 
6168
 
6169
 
6170
 
6171
 
6172
 
6173
 
6174
 
6175
 
6176
 
6177
 
6178
 
6179
 
6180
 
6181
 
6182
 
6183
 
6184
 
6185
 
6186
 
6187
 
6188
 
6189
 
6190
 
6191
 
6192
 
6193
 
6194
 
6195
 
6196
 
6197
 
6198
 
6199
 
6200
 
6201
 
6202
 
6203
 
6204
 
6205
 
6206
 
6207
 
6208
 
6209
 
6210
 
6211
 
6212
 
6213
 
6214
 
6215
 
6216
 
6217
 
2330 Serge 6218
static void intel_sanitize_modesetting(struct drm_device *dev,
6219
				       int pipe, int plane)
6220
{
6221
	struct drm_i915_private *dev_priv = dev->dev_private;
6222
	u32 reg, val;
2327 Serge 6223
 
2330 Serge 6224
	if (HAS_PCH_SPLIT(dev))
6225
		return;
2327 Serge 6226
 
2330 Serge 6227
	/* Who knows what state these registers were left in by the BIOS or
6228
	 * grub?
6229
	 *
6230
	 * If we leave the registers in a conflicting state (e.g. with the
6231
	 * display plane reading from the other pipe than the one we intend
6232
	 * to use) then when we attempt to teardown the active mode, we will
6233
	 * not disable the pipes and planes in the correct order -- leaving
6234
	 * a plane reading from a disabled pipe and possibly leading to
6235
	 * undefined behaviour.
6236
	 */
2327 Serge 6237
 
2330 Serge 6238
	reg = DSPCNTR(plane);
6239
	val = I915_READ(reg);
2327 Serge 6240
 
2330 Serge 6241
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
6242
		return;
6243
	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6244
		return;
2327 Serge 6245
 
2330 Serge 6246
	/* This display plane is active and attached to the other CPU pipe. */
6247
	pipe = !pipe;
2327 Serge 6248
 
2330 Serge 6249
	/* Disable the plane and wait for it to stop reading from the pipe. */
6250
	intel_disable_plane(dev_priv, plane, pipe);
6251
	intel_disable_pipe(dev_priv, pipe);
6252
}
2327 Serge 6253
 
2330 Serge 6254
static void intel_crtc_reset(struct drm_crtc *crtc)
6255
{
6256
	struct drm_device *dev = crtc->dev;
6257
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6258
 
2330 Serge 6259
	/* Reset flags back to the 'unknown' status so that they
6260
	 * will be correctly set on the initial modeset.
6261
	 */
6262
	intel_crtc->dpms_mode = -1;
2327 Serge 6263
 
2330 Serge 6264
	/* We need to fix up any BIOS configuration that conflicts with
6265
	 * our expectations.
6266
	 */
6267
	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6268
}
2327 Serge 6269
 
2330 Serge 6270
static struct drm_crtc_helper_funcs intel_helper_funcs = {
6271
	.dpms = intel_crtc_dpms,
6272
	.mode_fixup = intel_crtc_mode_fixup,
6273
	.mode_set = intel_crtc_mode_set,
6274
	.mode_set_base = intel_pipe_set_base,
6275
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
6276
	.load_lut = intel_crtc_load_lut,
6277
	.disable = intel_crtc_disable,
6278
};
2327 Serge 6279
 
2330 Serge 6280
static const struct drm_crtc_funcs intel_crtc_funcs = {
6281
	.reset = intel_crtc_reset,
6282
//	.cursor_set = intel_crtc_cursor_set,
6283
//	.cursor_move = intel_crtc_cursor_move,
6284
	.gamma_set = intel_crtc_gamma_set,
6285
	.set_config = drm_crtc_helper_set_config,
6286
	.destroy = intel_crtc_destroy,
6287
//	.page_flip = intel_crtc_page_flip,
6288
};
2327 Serge 6289
 
2330 Serge 6290
static void intel_crtc_init(struct drm_device *dev, int pipe)
6291
{
6292
	drm_i915_private_t *dev_priv = dev->dev_private;
6293
	struct intel_crtc *intel_crtc;
6294
	int i;
2327 Serge 6295
 
2330 Serge 6296
    ENTER();
2327 Serge 6297
 
2330 Serge 6298
	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
6299
	if (intel_crtc == NULL)
6300
		return;
2327 Serge 6301
 
2330 Serge 6302
	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
2327 Serge 6303
 
2330 Serge 6304
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
6305
	for (i = 0; i < 256; i++) {
6306
		intel_crtc->lut_r[i] = i;
6307
		intel_crtc->lut_g[i] = i;
6308
		intel_crtc->lut_b[i] = i;
6309
	}
2327 Serge 6310
 
2330 Serge 6311
	/* Swap pipes & planes for FBC on pre-965 */
6312
	intel_crtc->pipe = pipe;
6313
	intel_crtc->plane = pipe;
6314
	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
6315
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
6316
		intel_crtc->plane = !pipe;
6317
	}
2327 Serge 6318
 
2330 Serge 6319
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
6320
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
6321
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6322
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 6323
 
2330 Serge 6324
	intel_crtc_reset(&intel_crtc->base);
6325
	intel_crtc->active = true; /* force the pipe off on setup_init_config */
6326
	intel_crtc->bpp = 24; /* default for pre-Ironlake */
2327 Serge 6327
 
2330 Serge 6328
	if (HAS_PCH_SPLIT(dev)) {
6329
		intel_helper_funcs.prepare = ironlake_crtc_prepare;
6330
		intel_helper_funcs.commit = ironlake_crtc_commit;
6331
	} else {
6332
		intel_helper_funcs.prepare = i9xx_crtc_prepare;
6333
		intel_helper_funcs.commit = i9xx_crtc_commit;
6334
	}
2327 Serge 6335
 
2330 Serge 6336
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
2327 Serge 6337
 
2330 Serge 6338
	intel_crtc->busy = false;
2327 Serge 6339
 
2330 Serge 6340
    LEAVE();
2327 Serge 6341
 
2330 Serge 6342
//	setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6343
//		    (unsigned long)intel_crtc);
6344
}
2327 Serge 6345
 
6346
 
6347
 
6348
 
6349
 
6350
 
6351
 
2330 Serge 6352
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
6353
{
6354
	struct intel_encoder *encoder;
6355
	int index_mask = 0;
6356
	int entry = 0;
2327 Serge 6357
 
2330 Serge 6358
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6359
		if (type_mask & encoder->clone_mask)
6360
			index_mask |= (1 << entry);
6361
		entry++;
6362
	}
2327 Serge 6363
 
2330 Serge 6364
	return index_mask;
6365
}
2327 Serge 6366
 
2330 Serge 6367
static bool has_edp_a(struct drm_device *dev)
6368
{
6369
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 6370
 
2330 Serge 6371
	if (!IS_MOBILE(dev))
6372
		return false;
2327 Serge 6373
 
2330 Serge 6374
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6375
		return false;
2327 Serge 6376
 
2330 Serge 6377
	if (IS_GEN5(dev) &&
6378
	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6379
		return false;
2327 Serge 6380
 
2330 Serge 6381
	return true;
6382
}
2327 Serge 6383
 
2330 Serge 6384
static void intel_setup_outputs(struct drm_device *dev)
6385
{
6386
	struct drm_i915_private *dev_priv = dev->dev_private;
6387
	struct intel_encoder *encoder;
6388
	bool dpd_is_edp = false;
6389
	bool has_lvds = false;
2327 Serge 6390
 
2330 Serge 6391
	if (IS_MOBILE(dev) && !IS_I830(dev))
6392
		has_lvds = intel_lvds_init(dev);
6393
	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6394
		/* disable the panel fitter on everything but LVDS */
6395
		I915_WRITE(PFIT_CONTROL, 0);
6396
	}
2327 Serge 6397
 
2330 Serge 6398
	if (HAS_PCH_SPLIT(dev)) {
6399
		dpd_is_edp = intel_dpd_is_edp(dev);
2327 Serge 6400
 
2330 Serge 6401
		if (has_edp_a(dev))
6402
			intel_dp_init(dev, DP_A);
2327 Serge 6403
 
2330 Serge 6404
		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6405
			intel_dp_init(dev, PCH_DP_D);
6406
	}
2327 Serge 6407
 
2330 Serge 6408
	intel_crt_init(dev);
2327 Serge 6409
 
2330 Serge 6410
	if (HAS_PCH_SPLIT(dev)) {
6411
		int found;
2327 Serge 6412
 
2330 Serge 6413
		if (I915_READ(HDMIB) & PORT_DETECTED) {
6414
			/* PCH SDVOB multiplex with HDMIB */
6415
			found = intel_sdvo_init(dev, PCH_SDVOB);
6416
			if (!found)
6417
				intel_hdmi_init(dev, HDMIB);
6418
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6419
				intel_dp_init(dev, PCH_DP_B);
6420
		}
2327 Serge 6421
 
2330 Serge 6422
		if (I915_READ(HDMIC) & PORT_DETECTED)
6423
			intel_hdmi_init(dev, HDMIC);
2327 Serge 6424
 
2330 Serge 6425
		if (I915_READ(HDMID) & PORT_DETECTED)
6426
			intel_hdmi_init(dev, HDMID);
2327 Serge 6427
 
2330 Serge 6428
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
6429
			intel_dp_init(dev, PCH_DP_C);
2327 Serge 6430
 
2330 Serge 6431
		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6432
			intel_dp_init(dev, PCH_DP_D);
2327 Serge 6433
 
2330 Serge 6434
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6435
		bool found = false;
2327 Serge 6436
 
2330 Serge 6437
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6438
			DRM_DEBUG_KMS("probing SDVOB\n");
6439
			found = intel_sdvo_init(dev, SDVOB);
6440
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6441
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6442
				intel_hdmi_init(dev, SDVOB);
6443
			}
2327 Serge 6444
 
2330 Serge 6445
			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6446
				DRM_DEBUG_KMS("probing DP_B\n");
6447
				intel_dp_init(dev, DP_B);
6448
			}
6449
		}
2327 Serge 6450
 
2330 Serge 6451
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 6452
 
2330 Serge 6453
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6454
			DRM_DEBUG_KMS("probing SDVOC\n");
6455
			found = intel_sdvo_init(dev, SDVOC);
6456
		}
2327 Serge 6457
 
2330 Serge 6458
		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
2327 Serge 6459
 
2330 Serge 6460
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6461
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6462
				intel_hdmi_init(dev, SDVOC);
6463
			}
6464
			if (SUPPORTS_INTEGRATED_DP(dev)) {
6465
				DRM_DEBUG_KMS("probing DP_C\n");
6466
				intel_dp_init(dev, DP_C);
6467
			}
6468
		}
2327 Serge 6469
 
2330 Serge 6470
		if (SUPPORTS_INTEGRATED_DP(dev) &&
6471
		    (I915_READ(DP_D) & DP_DETECTED)) {
6472
			DRM_DEBUG_KMS("probing DP_D\n");
6473
			intel_dp_init(dev, DP_D);
6474
		}
6475
	} else if (IS_GEN2(dev))
6476
		intel_dvo_init(dev);
2327 Serge 6477
 
2330 Serge 6478
//   if (SUPPORTS_TV(dev))
6479
//       intel_tv_init(dev);
2327 Serge 6480
 
2330 Serge 6481
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6482
		encoder->base.possible_crtcs = encoder->crtc_mask;
6483
		encoder->base.possible_clones =
6484
			intel_encoder_clones(dev, encoder->clone_mask);
6485
	}
2327 Serge 6486
 
2330 Serge 6487
	/* disable all the possible outputs/crtcs before entering KMS mode */
6488
//	drm_helper_disable_unused_functions(dev);
6489
}
6490
 
6491
 
6492
 
6493
 
2327 Serge 6494
static const struct drm_mode_config_funcs intel_mode_funcs = {
6495
	.fb_create = NULL /*intel_user_framebuffer_create*/,
6496
	.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
6497
};
6498
 
6499
 
6500
 
6501
 
6502
 
6503
 
6504
 
6505
 
6506
 
6507
 
6508
 
6509
 
6510
 
6511
 
6512
 
6513
 
6514
 
6515
 
6516
 
6517
 
6518
 
6519
 
6520
 
6521
 
6522
 
6523
 
6524
 
6525
 
6526
 
6527
 
6528
 
6529
 
2330 Serge 6530
bool ironlake_set_drps(struct drm_device *dev, u8 val)
6531
{
6532
	struct drm_i915_private *dev_priv = dev->dev_private;
6533
	u16 rgvswctl;
2327 Serge 6534
 
2330 Serge 6535
	rgvswctl = I915_READ16(MEMSWCTL);
6536
	if (rgvswctl & MEMCTL_CMD_STS) {
6537
		DRM_DEBUG("gpu busy, RCS change rejected\n");
6538
		return false; /* still busy with another command */
6539
	}
2327 Serge 6540
 
2330 Serge 6541
	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
6542
		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
6543
	I915_WRITE16(MEMSWCTL, rgvswctl);
6544
	POSTING_READ16(MEMSWCTL);
2327 Serge 6545
 
2330 Serge 6546
	rgvswctl |= MEMCTL_CMD_STS;
6547
	I915_WRITE16(MEMSWCTL, rgvswctl);
2327 Serge 6548
 
2330 Serge 6549
	return true;
6550
}
2327 Serge 6551
 
2330 Serge 6552
void ironlake_enable_drps(struct drm_device *dev)
6553
{
6554
	struct drm_i915_private *dev_priv = dev->dev_private;
6555
	u32 rgvmodectl = I915_READ(MEMMODECTL);
6556
	u8 fmax, fmin, fstart, vstart;
2327 Serge 6557
 
2330 Serge 6558
	/* Enable temp reporting */
6559
	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
6560
	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2327 Serge 6561
 
2330 Serge 6562
	/* 100ms RC evaluation intervals */
6563
	I915_WRITE(RCUPEI, 100000);
6564
	I915_WRITE(RCDNEI, 100000);
2327 Serge 6565
 
2330 Serge 6566
	/* Set max/min thresholds to 90ms and 80ms respectively */
6567
	I915_WRITE(RCBMAXAVG, 90000);
6568
	I915_WRITE(RCBMINAVG, 80000);
2327 Serge 6569
 
2330 Serge 6570
	I915_WRITE(MEMIHYST, 1);
2327 Serge 6571
 
2330 Serge 6572
	/* Set up min, max, and cur for interrupt handling */
6573
	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
6574
	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
6575
	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
6576
		MEMMODE_FSTART_SHIFT;
2327 Serge 6577
 
2330 Serge 6578
	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
6579
		PXVFREQ_PX_SHIFT;
2327 Serge 6580
 
2330 Serge 6581
	dev_priv->fmax = fmax; /* IPS callback will increase this */
6582
	dev_priv->fstart = fstart;
2327 Serge 6583
 
2330 Serge 6584
	dev_priv->max_delay = fstart;
6585
	dev_priv->min_delay = fmin;
6586
	dev_priv->cur_delay = fstart;
2327 Serge 6587
 
2330 Serge 6588
	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
6589
			 fmax, fmin, fstart);
2327 Serge 6590
 
2330 Serge 6591
	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2327 Serge 6592
 
2330 Serge 6593
	/*
6594
	 * Interrupts will be enabled in ironlake_irq_postinstall
6595
	 */
2327 Serge 6596
 
2330 Serge 6597
	I915_WRITE(VIDSTART, vstart);
6598
	POSTING_READ(VIDSTART);
2327 Serge 6599
 
2330 Serge 6600
	rgvmodectl |= MEMMODE_SWMODE_EN;
6601
	I915_WRITE(MEMMODECTL, rgvmodectl);
2327 Serge 6602
 
2330 Serge 6603
	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
6604
		DRM_ERROR("stuck trying to change perf mode\n");
6605
	msleep(1);
2327 Serge 6606
 
2330 Serge 6607
	ironlake_set_drps(dev, fstart);
2327 Serge 6608
 
2330 Serge 6609
	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
6610
		I915_READ(0x112e0);
6611
//   dev_priv->last_time1 = jiffies_to_msecs(jiffies);
6612
	dev_priv->last_count2 = I915_READ(0x112f4);
6613
//   getrawmonotonic(&dev_priv->last_time2);
6614
}
2327 Serge 6615
 
6616
 
6617
 
6618
 
6619
 
6620
 
6621
 
6622
 
6623
 
6624
 
6625
 
6626
 
6627
 
6628
 
6629
 
2330 Serge 6630
static unsigned long intel_pxfreq(u32 vidfreq)
6631
{
6632
	unsigned long freq;
6633
	int div = (vidfreq & 0x3f0000) >> 16;
6634
	int post = (vidfreq & 0x3000) >> 12;
6635
	int pre = (vidfreq & 0x7);
2327 Serge 6636
 
2330 Serge 6637
	if (!pre)
6638
		return 0;
2327 Serge 6639
 
2330 Serge 6640
	freq = ((div * 133333) / ((1<
2327 Serge 6641
 
2330 Serge 6642
	return freq;
6643
}
2327 Serge 6644
 
2330 Serge 6645
void intel_init_emon(struct drm_device *dev)
6646
{
6647
	struct drm_i915_private *dev_priv = dev->dev_private;
6648
	u32 lcfuse;
6649
	u8 pxw[16];
6650
	int i;
2327 Serge 6651
 
2330 Serge 6652
	/* Disable to program */
6653
	I915_WRITE(ECR, 0);
6654
	POSTING_READ(ECR);
2327 Serge 6655
 
2330 Serge 6656
	/* Program energy weights for various events */
6657
	I915_WRITE(SDEW, 0x15040d00);
6658
	I915_WRITE(CSIEW0, 0x007f0000);
6659
	I915_WRITE(CSIEW1, 0x1e220004);
6660
	I915_WRITE(CSIEW2, 0x04000004);
2327 Serge 6661
 
2330 Serge 6662
	for (i = 0; i < 5; i++)
6663
		I915_WRITE(PEW + (i * 4), 0);
6664
	for (i = 0; i < 3; i++)
6665
		I915_WRITE(DEW + (i * 4), 0);
2327 Serge 6666
 
2330 Serge 6667
	/* Program P-state weights to account for frequency power adjustment */
6668
	for (i = 0; i < 16; i++) {
6669
		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
6670
		unsigned long freq = intel_pxfreq(pxvidfreq);
6671
		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6672
			PXVFREQ_PX_SHIFT;
6673
		unsigned long val;
2327 Serge 6674
 
2330 Serge 6675
		val = vid * vid;
6676
		val *= (freq / 1000);
6677
		val *= 255;
6678
		val /= (127*127*900);
6679
		if (val > 0xff)
6680
			DRM_ERROR("bad pxval: %ld\n", val);
6681
		pxw[i] = val;
6682
	}
6683
	/* Render standby states get 0 weight */
6684
	pxw[14] = 0;
6685
	pxw[15] = 0;
2327 Serge 6686
 
2330 Serge 6687
	for (i = 0; i < 4; i++) {
6688
		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6689
			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6690
		I915_WRITE(PXW + (i * 4), val);
6691
	}
2327 Serge 6692
 
2330 Serge 6693
	/* Adjust magic regs to magic values (more experimental results) */
6694
	I915_WRITE(OGW0, 0);
6695
	I915_WRITE(OGW1, 0);
6696
	I915_WRITE(EG0, 0x00007f00);
6697
	I915_WRITE(EG1, 0x0000000e);
6698
	I915_WRITE(EG2, 0x000e0000);
6699
	I915_WRITE(EG3, 0x68000300);
6700
	I915_WRITE(EG4, 0x42000000);
6701
	I915_WRITE(EG5, 0x00140031);
6702
	I915_WRITE(EG6, 0);
6703
	I915_WRITE(EG7, 0);
2327 Serge 6704
 
2330 Serge 6705
	for (i = 0; i < 8; i++)
6706
		I915_WRITE(PXWL + (i * 4), 0);
2327 Serge 6707
 
2330 Serge 6708
	/* Enable PMON + select events */
6709
	I915_WRITE(ECR, 0x80000019);
2327 Serge 6710
 
2330 Serge 6711
	lcfuse = I915_READ(LCFUSE02);
6712
 
6713
	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
6714
}
6715
 
6716
void gen6_enable_rps(struct drm_i915_private *dev_priv)
6717
{
6718
	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6719
	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
6720
	u32 pcu_mbox, rc6_mask = 0;
6721
	int cur_freq, min_freq, max_freq;
6722
	int i;
6723
 
6724
	/* Here begins a magic sequence of register writes to enable
6725
	 * auto-downclocking.
6726
	 *
6727
	 * Perhaps there might be some value in exposing these to
6728
	 * userspace...
6729
	 */
6730
	I915_WRITE(GEN6_RC_STATE, 0);
6731
	mutex_lock(&dev_priv->dev->struct_mutex);
6732
	gen6_gt_force_wake_get(dev_priv);
6733
 
6734
	/* disable the counters and set deterministic thresholds */
6735
	I915_WRITE(GEN6_RC_CONTROL, 0);
6736
 
6737
	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
6738
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
6739
	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
6740
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6741
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6742
 
6743
	for (i = 0; i < I915_NUM_RINGS; i++)
6744
		I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
6745
 
6746
	I915_WRITE(GEN6_RC_SLEEP, 0);
6747
	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
6748
	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
6749
	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
6750
	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6751
 
6752
	if (i915_enable_rc6)
6753
		rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
6754
			GEN6_RC_CTL_RC6_ENABLE;
6755
 
6756
	I915_WRITE(GEN6_RC_CONTROL,
6757
		   rc6_mask |
6758
		   GEN6_RC_CTL_EI_MODE(1) |
6759
		   GEN6_RC_CTL_HW_ENABLE);
6760
 
6761
	I915_WRITE(GEN6_RPNSWREQ,
6762
		   GEN6_FREQUENCY(10) |
6763
		   GEN6_OFFSET(0) |
6764
		   GEN6_AGGRESSIVE_TURBO);
6765
	I915_WRITE(GEN6_RC_VIDEO_FREQ,
6766
		   GEN6_FREQUENCY(12));
6767
 
6768
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6769
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6770
		   18 << 24 |
6771
		   6 << 16);
6772
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
6773
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
6774
	I915_WRITE(GEN6_RP_UP_EI, 100000);
6775
	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
6776
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6777
	I915_WRITE(GEN6_RP_CONTROL,
6778
		   GEN6_RP_MEDIA_TURBO |
6779
		   GEN6_RP_USE_NORMAL_FREQ |
6780
		   GEN6_RP_MEDIA_IS_GFX |
6781
		   GEN6_RP_ENABLE |
6782
		   GEN6_RP_UP_BUSY_AVG |
6783
		   GEN6_RP_DOWN_IDLE_CONT);
6784
 
6785
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6786
		     500))
6787
		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6788
 
6789
	I915_WRITE(GEN6_PCODE_DATA, 0);
6790
	I915_WRITE(GEN6_PCODE_MAILBOX,
6791
		   GEN6_PCODE_READY |
6792
		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
6793
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6794
		     500))
6795
		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6796
 
6797
	min_freq = (rp_state_cap & 0xff0000) >> 16;
6798
	max_freq = rp_state_cap & 0xff;
6799
	cur_freq = (gt_perf_status & 0xff00) >> 8;
6800
 
6801
	/* Check for overclock support */
6802
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6803
		     500))
6804
		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6805
	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
6806
	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
6807
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6808
		     500))
6809
		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6810
	if (pcu_mbox & (1<<31)) { /* OC supported */
6811
		max_freq = pcu_mbox & 0xff;
6812
		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
6813
	}
6814
 
6815
	/* In units of 100MHz */
6816
	dev_priv->max_delay = max_freq;
6817
	dev_priv->min_delay = min_freq;
6818
	dev_priv->cur_delay = cur_freq;
6819
 
6820
	/* requires MSI enabled */
6821
	I915_WRITE(GEN6_PMIER,
6822
		   GEN6_PM_MBOX_EVENT |
6823
		   GEN6_PM_THERMAL_EVENT |
6824
		   GEN6_PM_RP_DOWN_TIMEOUT |
6825
		   GEN6_PM_RP_UP_THRESHOLD |
6826
		   GEN6_PM_RP_DOWN_THRESHOLD |
6827
		   GEN6_PM_RP_UP_EI_EXPIRED |
6828
		   GEN6_PM_RP_DOWN_EI_EXPIRED);
6829
//   spin_lock_irq(&dev_priv->rps_lock);
6830
//   WARN_ON(dev_priv->pm_iir != 0);
6831
	I915_WRITE(GEN6_PMIMR, 0);
6832
//   spin_unlock_irq(&dev_priv->rps_lock);
6833
	/* enable all PM interrupts */
6834
	I915_WRITE(GEN6_PMINTRMSK, 0);
6835
 
6836
	gen6_gt_force_wake_put(dev_priv);
6837
	mutex_unlock(&dev_priv->dev->struct_mutex);
6838
}
6839
 
6840
void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
6841
{
6842
	int min_freq = 15;
6843
	int gpu_freq, ia_freq, max_ia_freq;
6844
	int scaling_factor = 180;
6845
 
6846
//   max_ia_freq = cpufreq_quick_get_max(0);
6847
	/*
6848
	 * Default to measured freq if none found, PCU will ensure we don't go
6849
	 * over
6850
	 */
6851
//   if (!max_ia_freq)
6852
		max_ia_freq = 3000000; //tsc_khz;
6853
 
6854
	/* Convert from kHz to MHz */
6855
	max_ia_freq /= 1000;
6856
 
6857
	mutex_lock(&dev_priv->dev->struct_mutex);
6858
 
6859
	/*
6860
	 * For each potential GPU frequency, load a ring frequency we'd like
6861
	 * to use for memory access.  We do this by specifying the IA frequency
6862
	 * the PCU should use as a reference to determine the ring frequency.
6863
	 */
6864
	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
6865
	     gpu_freq--) {
6866
		int diff = dev_priv->max_delay - gpu_freq;
6867
 
6868
		/*
6869
		 * For GPU frequencies less than 750MHz, just use the lowest
6870
		 * ring freq.
6871
		 */
6872
		if (gpu_freq < min_freq)
6873
			ia_freq = 800;
6874
		else
6875
			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
6876
		ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
6877
 
6878
		I915_WRITE(GEN6_PCODE_DATA,
6879
			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
6880
			   gpu_freq);
6881
		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
6882
			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
6883
		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
6884
			      GEN6_PCODE_READY) == 0, 10)) {
6885
			DRM_ERROR("pcode write of freq table timed out\n");
6886
			continue;
6887
		}
6888
	}
6889
 
6890
	mutex_unlock(&dev_priv->dev->struct_mutex);
6891
}
6892
 
2327 Serge 6893
static void ironlake_init_clock_gating(struct drm_device *dev)
6894
{
6895
    struct drm_i915_private *dev_priv = dev->dev_private;
6896
    uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
6897
 
6898
    /* Required for FBC */
6899
    dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
6900
        DPFCRUNIT_CLOCK_GATE_DISABLE |
6901
        DPFDUNIT_CLOCK_GATE_DISABLE;
6902
    /* Required for CxSR */
6903
    dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
6904
 
6905
    I915_WRITE(PCH_3DCGDIS0,
6906
           MARIUNIT_CLOCK_GATE_DISABLE |
6907
           SVSMUNIT_CLOCK_GATE_DISABLE);
6908
    I915_WRITE(PCH_3DCGDIS1,
6909
           VFMUNIT_CLOCK_GATE_DISABLE);
6910
 
6911
    I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
6912
 
6913
    /*
6914
     * According to the spec the following bits should be set in
6915
     * order to enable memory self-refresh
6916
     * The bit 22/21 of 0x42004
6917
     * The bit 5 of 0x42020
6918
     * The bit 15 of 0x45000
6919
     */
6920
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
6921
           (I915_READ(ILK_DISPLAY_CHICKEN2) |
6922
            ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6923
    I915_WRITE(ILK_DSPCLK_GATE,
6924
           (I915_READ(ILK_DSPCLK_GATE) |
6925
            ILK_DPARB_CLK_GATE));
6926
    I915_WRITE(DISP_ARB_CTL,
6927
           (I915_READ(DISP_ARB_CTL) |
6928
            DISP_FBC_WM_DIS));
6929
    I915_WRITE(WM3_LP_ILK, 0);
6930
    I915_WRITE(WM2_LP_ILK, 0);
6931
    I915_WRITE(WM1_LP_ILK, 0);
6932
 
6933
    /*
6934
     * Based on the document from hardware guys the following bits
6935
     * should be set unconditionally in order to enable FBC.
6936
     * The bit 22 of 0x42000
6937
     * The bit 22 of 0x42004
6938
     * The bit 7,8,9 of 0x42020.
6939
     */
6940
    if (IS_IRONLAKE_M(dev)) {
6941
        I915_WRITE(ILK_DISPLAY_CHICKEN1,
6942
               I915_READ(ILK_DISPLAY_CHICKEN1) |
6943
               ILK_FBCQ_DIS);
6944
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
6945
               I915_READ(ILK_DISPLAY_CHICKEN2) |
6946
               ILK_DPARB_GATE);
6947
        I915_WRITE(ILK_DSPCLK_GATE,
6948
               I915_READ(ILK_DSPCLK_GATE) |
6949
               ILK_DPFC_DIS1 |
6950
               ILK_DPFC_DIS2 |
6951
               ILK_CLK_FBC);
6952
    }
6953
 
6954
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
6955
           I915_READ(ILK_DISPLAY_CHICKEN2) |
6956
           ILK_ELPIN_409_SELECT);
6957
    I915_WRITE(_3D_CHICKEN2,
6958
           _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6959
           _3D_CHICKEN2_WM_READ_PIPELINED);
6960
}
6961
 
6962
static void gen6_init_clock_gating(struct drm_device *dev)
6963
{
6964
	struct drm_i915_private *dev_priv = dev->dev_private;
6965
	int pipe;
6966
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
6967
 
6968
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
6969
 
6970
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
6971
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
6972
		   ILK_ELPIN_409_SELECT);
6973
 
6974
	I915_WRITE(WM3_LP_ILK, 0);
6975
	I915_WRITE(WM2_LP_ILK, 0);
6976
	I915_WRITE(WM1_LP_ILK, 0);
6977
 
6978
	/*
6979
	 * According to the spec the following bits should be
6980
	 * set in order to enable memory self-refresh and fbc:
6981
	 * The bit21 and bit22 of 0x42000
6982
	 * The bit21 and bit22 of 0x42004
6983
	 * The bit5 and bit7 of 0x42020
6984
	 * The bit14 of 0x70180
6985
	 * The bit14 of 0x71180
6986
	 */
6987
	I915_WRITE(ILK_DISPLAY_CHICKEN1,
6988
		   I915_READ(ILK_DISPLAY_CHICKEN1) |
6989
		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6990
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
6991
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
6992
		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6993
	I915_WRITE(ILK_DSPCLK_GATE,
6994
		   I915_READ(ILK_DSPCLK_GATE) |
6995
		   ILK_DPARB_CLK_GATE  |
6996
		   ILK_DPFD_CLK_GATE);
6997
 
6998
	for_each_pipe(pipe) {
6999
		I915_WRITE(DSPCNTR(pipe),
7000
			   I915_READ(DSPCNTR(pipe)) |
7001
			   DISPPLANE_TRICKLE_FEED_DISABLE);
7002
		intel_flush_display_plane(dev_priv, pipe);
7003
	}
7004
}
7005
 
7006
static void ivybridge_init_clock_gating(struct drm_device *dev)
7007
{
7008
	struct drm_i915_private *dev_priv = dev->dev_private;
7009
	int pipe;
7010
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7011
 
7012
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7013
 
7014
	I915_WRITE(WM3_LP_ILK, 0);
7015
	I915_WRITE(WM2_LP_ILK, 0);
7016
	I915_WRITE(WM1_LP_ILK, 0);
7017
 
7018
	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
7019
 
7020
	for_each_pipe(pipe) {
7021
		I915_WRITE(DSPCNTR(pipe),
7022
			   I915_READ(DSPCNTR(pipe)) |
7023
			   DISPPLANE_TRICKLE_FEED_DISABLE);
7024
		intel_flush_display_plane(dev_priv, pipe);
7025
	}
7026
}
7027
 
7028
static void g4x_init_clock_gating(struct drm_device *dev)
7029
{
7030
    struct drm_i915_private *dev_priv = dev->dev_private;
7031
    uint32_t dspclk_gate;
7032
 
7033
    I915_WRITE(RENCLK_GATE_D1, 0);
7034
    I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7035
           GS_UNIT_CLOCK_GATE_DISABLE |
7036
           CL_UNIT_CLOCK_GATE_DISABLE);
7037
    I915_WRITE(RAMCLK_GATE_D, 0);
7038
    dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7039
        OVRUNIT_CLOCK_GATE_DISABLE |
7040
        OVCUNIT_CLOCK_GATE_DISABLE;
7041
    if (IS_GM45(dev))
7042
        dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7043
    I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7044
}
7045
 
7046
static void crestline_init_clock_gating(struct drm_device *dev)
7047
{
7048
	struct drm_i915_private *dev_priv = dev->dev_private;
7049
 
7050
	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7051
	I915_WRITE(RENCLK_GATE_D2, 0);
7052
	I915_WRITE(DSPCLK_GATE_D, 0);
7053
	I915_WRITE(RAMCLK_GATE_D, 0);
7054
	I915_WRITE16(DEUC, 0);
7055
}
7056
 
7057
static void broadwater_init_clock_gating(struct drm_device *dev)
7058
{
7059
	struct drm_i915_private *dev_priv = dev->dev_private;
7060
 
7061
	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7062
		   I965_RCC_CLOCK_GATE_DISABLE |
7063
		   I965_RCPB_CLOCK_GATE_DISABLE |
7064
		   I965_ISC_CLOCK_GATE_DISABLE |
7065
		   I965_FBC_CLOCK_GATE_DISABLE);
7066
	I915_WRITE(RENCLK_GATE_D2, 0);
7067
}
7068
 
7069
static void gen3_init_clock_gating(struct drm_device *dev)
7070
{
7071
    struct drm_i915_private *dev_priv = dev->dev_private;
7072
    u32 dstate = I915_READ(D_STATE);
7073
 
7074
    dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7075
        DSTATE_DOT_CLOCK_GATING;
7076
    I915_WRITE(D_STATE, dstate);
7077
}
7078
 
7079
static void i85x_init_clock_gating(struct drm_device *dev)
7080
{
7081
	struct drm_i915_private *dev_priv = dev->dev_private;
7082
 
7083
	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7084
}
7085
 
7086
static void i830_init_clock_gating(struct drm_device *dev)
7087
{
7088
	struct drm_i915_private *dev_priv = dev->dev_private;
7089
 
7090
	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7091
}
7092
 
7093
static void ibx_init_clock_gating(struct drm_device *dev)
7094
{
7095
    struct drm_i915_private *dev_priv = dev->dev_private;
7096
 
7097
    /*
7098
     * On Ibex Peak and Cougar Point, we need to disable clock
7099
     * gating for the panel power sequencer or it will fail to
7100
     * start up when no ports are active.
7101
     */
7102
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7103
}
7104
 
7105
static void cpt_init_clock_gating(struct drm_device *dev)
7106
{
7107
    struct drm_i915_private *dev_priv = dev->dev_private;
7108
    int pipe;
7109
 
7110
    /*
7111
     * On Ibex Peak and Cougar Point, we need to disable clock
7112
     * gating for the panel power sequencer or it will fail to
7113
     * start up when no ports are active.
7114
     */
7115
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7116
    I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7117
           DPLS_EDP_PPS_FIX_DIS);
7118
    /* Without this, mode sets may fail silently on FDI */
7119
    for_each_pipe(pipe)
7120
        I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
7121
}
7122
 
2332 Serge 7123
static void ironlake_teardown_rc6(struct drm_device *dev)
7124
{
7125
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 7126
 
2332 Serge 7127
	if (dev_priv->renderctx) {
7128
//		i915_gem_object_unpin(dev_priv->renderctx);
7129
//		drm_gem_object_unreference(&dev_priv->renderctx->base);
7130
		dev_priv->renderctx = NULL;
7131
	}
2327 Serge 7132
 
2332 Serge 7133
	if (dev_priv->pwrctx) {
7134
//		i915_gem_object_unpin(dev_priv->pwrctx);
7135
//		drm_gem_object_unreference(&dev_priv->pwrctx->base);
7136
		dev_priv->pwrctx = NULL;
7137
	}
7138
}
2327 Serge 7139
 
2330 Serge 7140
 
2332 Serge 7141
 
7142
 
7143
 
7144
 
7145
 
7146
static int ironlake_setup_rc6(struct drm_device *dev)
7147
{
7148
	struct drm_i915_private *dev_priv = dev->dev_private;
7149
 
7150
	if (dev_priv->renderctx == NULL)
7151
//		dev_priv->renderctx = intel_alloc_context_page(dev);
7152
	if (!dev_priv->renderctx)
7153
		return -ENOMEM;
7154
 
7155
	if (dev_priv->pwrctx == NULL)
7156
//		dev_priv->pwrctx = intel_alloc_context_page(dev);
7157
	if (!dev_priv->pwrctx) {
7158
		ironlake_teardown_rc6(dev);
7159
		return -ENOMEM;
7160
	}
7161
 
7162
	return 0;
7163
}
7164
 
7165
void ironlake_enable_rc6(struct drm_device *dev)
7166
{
7167
	struct drm_i915_private *dev_priv = dev->dev_private;
7168
	int ret;
7169
 
7170
	/* rc6 disabled by default due to repeated reports of hanging during
7171
	 * boot and resume.
7172
	 */
7173
	if (!i915_enable_rc6)
7174
		return;
7175
 
7176
	mutex_lock(&dev->struct_mutex);
7177
	ret = ironlake_setup_rc6(dev);
7178
	if (ret) {
7179
		mutex_unlock(&dev->struct_mutex);
7180
		return;
7181
	}
7182
 
7183
	/*
7184
	 * GPU can automatically power down the render unit if given a page
7185
	 * to save state.
7186
	 */
7187
#if 0
7188
	ret = BEGIN_LP_RING(6);
7189
	if (ret) {
7190
		ironlake_teardown_rc6(dev);
7191
		mutex_unlock(&dev->struct_mutex);
7192
		return;
7193
	}
7194
 
7195
	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
7196
	OUT_RING(MI_SET_CONTEXT);
7197
	OUT_RING(dev_priv->renderctx->gtt_offset |
7198
		 MI_MM_SPACE_GTT |
7199
		 MI_SAVE_EXT_STATE_EN |
7200
		 MI_RESTORE_EXT_STATE_EN |
7201
		 MI_RESTORE_INHIBIT);
7202
	OUT_RING(MI_SUSPEND_FLUSH);
7203
	OUT_RING(MI_NOOP);
7204
	OUT_RING(MI_FLUSH);
7205
	ADVANCE_LP_RING();
7206
 
7207
	/*
7208
	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
7209
	 * does an implicit flush, combined with MI_FLUSH above, it should be
7210
	 * safe to assume that renderctx is valid
7211
	 */
7212
	ret = intel_wait_ring_idle(LP_RING(dev_priv));
7213
	if (ret) {
7214
		DRM_ERROR("failed to enable ironlake power power savings\n");
7215
		ironlake_teardown_rc6(dev);
7216
		mutex_unlock(&dev->struct_mutex);
7217
		return;
7218
	}
7219
#endif
7220
 
7221
	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
7222
	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7223
	mutex_unlock(&dev->struct_mutex);
7224
}
7225
 
2330 Serge 7226
void intel_init_clock_gating(struct drm_device *dev)
7227
{
7228
	struct drm_i915_private *dev_priv = dev->dev_private;
7229
 
7230
	dev_priv->display.init_clock_gating(dev);
7231
 
7232
	if (dev_priv->display.init_pch_clock_gating)
7233
		dev_priv->display.init_pch_clock_gating(dev);
7234
}
7235
 
2327 Serge 7236
/* Set up chip specific display functions */
7237
static void intel_init_display(struct drm_device *dev)
7238
{
7239
    struct drm_i915_private *dev_priv = dev->dev_private;
7240
 
7241
    /* We always want a DPMS function */
7242
    if (HAS_PCH_SPLIT(dev)) {
7243
        dev_priv->display.dpms = ironlake_crtc_dpms;
7244
        dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7245
        dev_priv->display.update_plane = ironlake_update_plane;
7246
    } else {
7247
        dev_priv->display.dpms = i9xx_crtc_dpms;
7248
        dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7249
        dev_priv->display.update_plane = i9xx_update_plane;
7250
    }
7251
 
7252
    if (I915_HAS_FBC(dev)) {
7253
        if (HAS_PCH_SPLIT(dev)) {
7254
            dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7255
            dev_priv->display.enable_fbc = ironlake_enable_fbc;
7256
            dev_priv->display.disable_fbc = ironlake_disable_fbc;
7257
        } else if (IS_GM45(dev)) {
7258
            dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7259
            dev_priv->display.enable_fbc = g4x_enable_fbc;
7260
            dev_priv->display.disable_fbc = g4x_disable_fbc;
7261
        } else if (IS_CRESTLINE(dev)) {
7262
            dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7263
            dev_priv->display.enable_fbc = i8xx_enable_fbc;
7264
            dev_priv->display.disable_fbc = i8xx_disable_fbc;
7265
        }
7266
        /* 855GM needs testing */
7267
    }
7268
 
7269
    /* Returns the core display clock speed */
7270
    if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
7271
        dev_priv->display.get_display_clock_speed =
7272
            i945_get_display_clock_speed;
7273
    else if (IS_I915G(dev))
7274
        dev_priv->display.get_display_clock_speed =
7275
            i915_get_display_clock_speed;
7276
    else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
7277
        dev_priv->display.get_display_clock_speed =
7278
            i9xx_misc_get_display_clock_speed;
7279
    else if (IS_I915GM(dev))
7280
        dev_priv->display.get_display_clock_speed =
7281
            i915gm_get_display_clock_speed;
7282
    else if (IS_I865G(dev))
7283
        dev_priv->display.get_display_clock_speed =
7284
            i865_get_display_clock_speed;
7285
    else if (IS_I85X(dev))
7286
        dev_priv->display.get_display_clock_speed =
7287
            i855_get_display_clock_speed;
7288
    else /* 852, 830 */
7289
        dev_priv->display.get_display_clock_speed =
7290
            i830_get_display_clock_speed;
7291
 
7292
    /* For FIFO watermark updates */
7293
    if (HAS_PCH_SPLIT(dev)) {
7294
        if (HAS_PCH_IBX(dev))
7295
            dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
7296
        else if (HAS_PCH_CPT(dev))
7297
            dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
7298
 
7299
        if (IS_GEN5(dev)) {
7300
            if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
7301
                dev_priv->display.update_wm = ironlake_update_wm;
7302
            else {
7303
                DRM_DEBUG_KMS("Failed to get proper latency. "
7304
                          "Disable CxSR\n");
7305
                dev_priv->display.update_wm = NULL;
7306
            }
7307
            dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
7308
            dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7309
        } else if (IS_GEN6(dev)) {
7310
            if (SNB_READ_WM0_LATENCY()) {
7311
                dev_priv->display.update_wm = sandybridge_update_wm;
7312
            } else {
7313
                DRM_DEBUG_KMS("Failed to read display plane latency. "
7314
                          "Disable CxSR\n");
7315
                dev_priv->display.update_wm = NULL;
7316
            }
7317
            dev_priv->display.fdi_link_train = gen6_fdi_link_train;
7318
            dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7319
        } else if (IS_IVYBRIDGE(dev)) {
7320
            /* FIXME: detect B0+ stepping and use auto training */
7321
            dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
7322
            if (SNB_READ_WM0_LATENCY()) {
7323
                dev_priv->display.update_wm = sandybridge_update_wm;
7324
            } else {
7325
                DRM_DEBUG_KMS("Failed to read display plane latency. "
7326
                          "Disable CxSR\n");
7327
                dev_priv->display.update_wm = NULL;
7328
            }
7329
            dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7330
 
7331
        } else
7332
            dev_priv->display.update_wm = NULL;
7333
    } else if (IS_PINEVIEW(dev)) {
7334
        if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7335
                        dev_priv->is_ddr3,
7336
                        dev_priv->fsb_freq,
7337
                        dev_priv->mem_freq)) {
7338
            DRM_INFO("failed to find known CxSR latency "
7339
                 "(found ddr%s fsb freq %d, mem freq %d), "
7340
                 "disabling CxSR\n",
7341
                 (dev_priv->is_ddr3 == 1) ? "3": "2",
7342
                 dev_priv->fsb_freq, dev_priv->mem_freq);
7343
            /* Disable CxSR and never update its watermark again */
7344
            pineview_disable_cxsr(dev);
7345
            dev_priv->display.update_wm = NULL;
7346
        } else
7347
            dev_priv->display.update_wm = pineview_update_wm;
7348
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7349
    } else if (IS_G4X(dev)) {
7350
        dev_priv->display.update_wm = g4x_update_wm;
7351
        dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7352
    } else if (IS_GEN4(dev)) {
7353
        dev_priv->display.update_wm = i965_update_wm;
7354
        if (IS_CRESTLINE(dev))
7355
            dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7356
        else if (IS_BROADWATER(dev))
7357
            dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7358
    } else if (IS_GEN3(dev)) {
7359
        dev_priv->display.update_wm = i9xx_update_wm;
7360
        dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7361
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7362
    } else if (IS_I865G(dev)) {
7363
        dev_priv->display.update_wm = i830_update_wm;
7364
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7365
        dev_priv->display.get_fifo_size = i830_get_fifo_size;
7366
    } else if (IS_I85X(dev)) {
7367
        dev_priv->display.update_wm = i9xx_update_wm;
7368
        dev_priv->display.get_fifo_size = i85x_get_fifo_size;
7369
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7370
    } else {
7371
        dev_priv->display.update_wm = i830_update_wm;
7372
        dev_priv->display.init_clock_gating = i830_init_clock_gating;
7373
        if (IS_845G(dev))
7374
            dev_priv->display.get_fifo_size = i845_get_fifo_size;
7375
        else
7376
            dev_priv->display.get_fifo_size = i830_get_fifo_size;
7377
    }
7378
 
7379
    /* Default just returns -ENODEV to indicate unsupported */
7380
//    dev_priv->display.queue_flip = intel_default_queue_flip;
7381
 
7382
#if 0
7383
    switch (INTEL_INFO(dev)->gen) {
7384
    case 2:
7385
        dev_priv->display.queue_flip = intel_gen2_queue_flip;
7386
        break;
7387
 
7388
    case 3:
7389
        dev_priv->display.queue_flip = intel_gen3_queue_flip;
7390
        break;
7391
 
7392
    case 4:
7393
    case 5:
7394
        dev_priv->display.queue_flip = intel_gen4_queue_flip;
7395
        break;
7396
 
7397
    case 6:
7398
        dev_priv->display.queue_flip = intel_gen6_queue_flip;
7399
        break;
7400
    case 7:
7401
        dev_priv->display.queue_flip = intel_gen7_queue_flip;
7402
        break;
7403
    }
7404
#endif
7405
}
7406
 
7407
/*
7408
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
7409
 * resume, or other times.  This quirk makes sure that's the case for
7410
 * affected systems.
7411
 */
7412
static void quirk_pipea_force (struct drm_device *dev)
7413
{
7414
    struct drm_i915_private *dev_priv = dev->dev_private;
7415
 
7416
    dev_priv->quirks |= QUIRK_PIPEA_FORCE;
7417
    DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
7418
}
7419
 
7420
/*
7421
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
7422
 */
7423
static void quirk_ssc_force_disable(struct drm_device *dev)
7424
{
7425
    struct drm_i915_private *dev_priv = dev->dev_private;
7426
    dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
7427
}
7428
 
7429
struct intel_quirk {
7430
    int device;
7431
    int subsystem_vendor;
7432
    int subsystem_device;
7433
    void (*hook)(struct drm_device *dev);
7434
};
7435
 
7436
struct intel_quirk intel_quirks[] = {
7437
    /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
7438
    { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
7439
    /* HP Mini needs pipe A force quirk (LP: #322104) */
7440
    { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
7441
 
7442
    /* Thinkpad R31 needs pipe A force quirk */
7443
    { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
7444
    /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
7445
    { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
7446
 
7447
    /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
7448
    { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
7449
    /* ThinkPad X40 needs pipe A force quirk */
7450
 
7451
    /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
7452
    { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
7453
 
7454
    /* 855 & before need to leave pipe A & dpll A up */
7455
    { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7456
    { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7457
 
7458
    /* Lenovo U160 cannot use SSC on LVDS */
7459
    { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
7460
 
7461
    /* Sony Vaio Y cannot use SSC on LVDS */
7462
    { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
7463
};
7464
 
7465
static void intel_init_quirks(struct drm_device *dev)
7466
{
7467
    struct pci_dev *d = dev->pdev;
7468
    int i;
7469
 
7470
    for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
7471
        struct intel_quirk *q = &intel_quirks[i];
7472
 
7473
        if (d->device == q->device &&
7474
            (d->subsystem_vendor == q->subsystem_vendor ||
7475
             q->subsystem_vendor == PCI_ANY_ID) &&
7476
            (d->subsystem_device == q->subsystem_device ||
7477
             q->subsystem_device == PCI_ANY_ID))
7478
            q->hook(dev);
7479
    }
7480
}
7481
 
2330 Serge 7482
/* Disable the VGA plane that we never use */
7483
static void i915_disable_vga(struct drm_device *dev)
7484
{
7485
	struct drm_i915_private *dev_priv = dev->dev_private;
7486
	u8 sr1;
7487
	u32 vga_reg;
2327 Serge 7488
 
2330 Serge 7489
	if (HAS_PCH_SPLIT(dev))
7490
		vga_reg = CPU_VGACNTRL;
7491
	else
7492
		vga_reg = VGACNTRL;
7493
 
7494
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
7495
    out8(VGA_SR_INDEX, 1);
7496
    sr1 = in8(VGA_SR_DATA);
7497
    out8(VGA_SR_DATA,sr1 | 1<<5);
7498
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
7499
	udelay(300);
7500
 
7501
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
7502
	POSTING_READ(vga_reg);
7503
}
7504
 
2327 Serge 7505
void intel_modeset_init(struct drm_device *dev)
7506
{
7507
    struct drm_i915_private *dev_priv = dev->dev_private;
7508
    int i;
7509
 
7510
    drm_mode_config_init(dev);
7511
 
7512
    dev->mode_config.min_width = 0;
7513
    dev->mode_config.min_height = 0;
7514
 
7515
    dev->mode_config.funcs = (void *)&intel_mode_funcs;
7516
 
7517
    intel_init_quirks(dev);
7518
 
7519
    intel_init_display(dev);
7520
 
7521
    if (IS_GEN2(dev)) {
7522
        dev->mode_config.max_width = 2048;
7523
        dev->mode_config.max_height = 2048;
7524
    } else if (IS_GEN3(dev)) {
7525
        dev->mode_config.max_width = 4096;
7526
        dev->mode_config.max_height = 4096;
7527
    } else {
7528
        dev->mode_config.max_width = 8192;
7529
        dev->mode_config.max_height = 8192;
7530
    }
7531
 
7532
    dev->mode_config.fb_base = get_bus_addr();
7533
 
7534
    DRM_DEBUG_KMS("%d display pipe%s available.\n",
7535
              dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
7536
 
7537
    for (i = 0; i < dev_priv->num_pipe; i++) {
7538
        intel_crtc_init(dev, i);
7539
    }
7540
 
7541
    /* Just disable it once at startup */
7542
    i915_disable_vga(dev);
7543
    intel_setup_outputs(dev);
7544
 
7545
    intel_init_clock_gating(dev);
7546
 
7547
    if (IS_IRONLAKE_M(dev)) {
7548
        ironlake_enable_drps(dev);
7549
        intel_init_emon(dev);
7550
    }
7551
 
7552
    if (IS_GEN6(dev) || IS_GEN7(dev)) {
7553
        gen6_enable_rps(dev_priv);
7554
        gen6_update_ring_freq(dev_priv);
7555
    }
7556
 
2332 Serge 7557
//   INIT_WORK(&dev_priv->idle_work, intel_idle_update);
7558
//   setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
7559
//           (unsigned long)dev);
2330 Serge 7560
}
2327 Serge 7561
 
2332 Serge 7562
void intel_modeset_gem_init(struct drm_device *dev)
7563
{
7564
	if (IS_IRONLAKE_M(dev))
7565
		ironlake_enable_rc6(dev);
2330 Serge 7566
 
2332 Serge 7567
//	intel_setup_overlay(dev);
7568
}
7569
 
7570
 
2330 Serge 7571
/*
7572
 * Return which encoder is currently attached for connector.
7573
 */
7574
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
7575
{
7576
	return &intel_attached_encoder(connector)->base;
2327 Serge 7577
}
7578
 
2330 Serge 7579
void intel_connector_attach_encoder(struct intel_connector *connector,
7580
				    struct intel_encoder *encoder)
7581
{
7582
	connector->encoder = encoder;
7583
	drm_mode_connector_attach_encoder(&connector->base,
7584
					  &encoder->base);
7585
}
2327 Serge 7586
 
2330 Serge 7587