Subversion Repositories Kolibri OS

Rev

Rev 2351 | Rev 3031 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
27
//#include 
28
#include 
29
//#include 
30
#include 
31
#include 
2330 Serge 32
#include 
2327 Serge 33
//#include 
2342 Serge 34
#include 
2327 Serge 35
#include "drmP.h"
36
#include "intel_drv.h"
2330 Serge 37
#include "i915_drm.h"
2327 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
2327 Serge 40
#include "drm_dp_helper.h"
41
#include "drm_crtc_helper.h"
42
 
43
phys_addr_t get_bus_addr(void);
44
 
45
static inline __attribute__((const))
46
bool is_power_of_2(unsigned long n)
47
{
48
    return (n != 0 && ((n & (n - 1)) == 0));
49
}
50
 
2330 Serge 51
#define MAX_ERRNO       4095
52
 
53
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
54
 
55
static inline long IS_ERR(const void *ptr)
56
{
57
    return IS_ERR_VALUE((unsigned long)ptr);
58
}
59
 
60
static inline void *ERR_PTR(long error)
61
{
62
    return (void *) error;
63
}
64
 
65
 
2327 Serge 66
static inline int pci_read_config_word(struct pci_dev *dev, int where,
67
                    u16 *val)
68
{
69
    *val = PciRead16(dev->busnr, dev->devfn, where);
70
    return 1;
71
}
72
 
73
 
74
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
75
 
2342 Serge 76
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
2327 Serge 77
static void intel_update_watermarks(struct drm_device *dev);
78
static void intel_increase_pllclock(struct drm_crtc *crtc);
79
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
80
 
81
typedef struct {
82
    /* given values */
83
    int n;
84
    int m1, m2;
85
    int p1, p2;
86
    /* derived values */
87
    int dot;
88
    int vco;
89
    int m;
90
    int p;
91
} intel_clock_t;
92
 
93
typedef struct {
94
    int min, max;
95
} intel_range_t;
96
 
97
typedef struct {
98
    int dot_limit;
99
    int p2_slow, p2_fast;
100
} intel_p2_t;
101
 
102
#define INTEL_P2_NUM              2
103
typedef struct intel_limit intel_limit_t;
104
struct intel_limit {
105
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
106
    intel_p2_t      p2;
107
    bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
108
              int, int, intel_clock_t *);
109
};
110
 
111
/* FDI */
112
#define IRONLAKE_FDI_FREQ       2700000 /* in kHz for mode->clock */
113
 
114
static bool
115
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
116
            int target, int refclk, intel_clock_t *best_clock);
117
static bool
118
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
119
            int target, int refclk, intel_clock_t *best_clock);
120
 
121
static bool
122
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
123
              int target, int refclk, intel_clock_t *best_clock);
124
static bool
125
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
126
               int target, int refclk, intel_clock_t *best_clock);
127
 
128
static inline u32 /* units of 100MHz */
129
intel_fdi_link_freq(struct drm_device *dev)
130
{
131
	if (IS_GEN5(dev)) {
132
		struct drm_i915_private *dev_priv = dev->dev_private;
133
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
134
	} else
135
		return 27;
136
}
137
 
138
static const intel_limit_t intel_limits_i8xx_dvo = {
139
        .dot = { .min = 25000, .max = 350000 },
140
        .vco = { .min = 930000, .max = 1400000 },
141
        .n = { .min = 3, .max = 16 },
142
        .m = { .min = 96, .max = 140 },
143
        .m1 = { .min = 18, .max = 26 },
144
        .m2 = { .min = 6, .max = 16 },
145
        .p = { .min = 4, .max = 128 },
146
        .p1 = { .min = 2, .max = 33 },
147
	.p2 = { .dot_limit = 165000,
148
		.p2_slow = 4, .p2_fast = 2 },
149
	.find_pll = intel_find_best_PLL,
150
};
151
 
152
static const intel_limit_t intel_limits_i8xx_lvds = {
153
        .dot = { .min = 25000, .max = 350000 },
154
        .vco = { .min = 930000, .max = 1400000 },
155
        .n = { .min = 3, .max = 16 },
156
        .m = { .min = 96, .max = 140 },
157
        .m1 = { .min = 18, .max = 26 },
158
        .m2 = { .min = 6, .max = 16 },
159
        .p = { .min = 4, .max = 128 },
160
        .p1 = { .min = 1, .max = 6 },
161
	.p2 = { .dot_limit = 165000,
162
		.p2_slow = 14, .p2_fast = 7 },
163
	.find_pll = intel_find_best_PLL,
164
};
165
 
166
static const intel_limit_t intel_limits_i9xx_sdvo = {
167
        .dot = { .min = 20000, .max = 400000 },
168
        .vco = { .min = 1400000, .max = 2800000 },
169
        .n = { .min = 1, .max = 6 },
170
        .m = { .min = 70, .max = 120 },
171
        .m1 = { .min = 10, .max = 22 },
172
        .m2 = { .min = 5, .max = 9 },
173
        .p = { .min = 5, .max = 80 },
174
        .p1 = { .min = 1, .max = 8 },
175
	.p2 = { .dot_limit = 200000,
176
		.p2_slow = 10, .p2_fast = 5 },
177
	.find_pll = intel_find_best_PLL,
178
};
179
 
180
static const intel_limit_t intel_limits_i9xx_lvds = {
181
        .dot = { .min = 20000, .max = 400000 },
182
        .vco = { .min = 1400000, .max = 2800000 },
183
        .n = { .min = 1, .max = 6 },
184
        .m = { .min = 70, .max = 120 },
185
        .m1 = { .min = 10, .max = 22 },
186
        .m2 = { .min = 5, .max = 9 },
187
        .p = { .min = 7, .max = 98 },
188
        .p1 = { .min = 1, .max = 8 },
189
	.p2 = { .dot_limit = 112000,
190
		.p2_slow = 14, .p2_fast = 7 },
191
	.find_pll = intel_find_best_PLL,
192
};
193
 
194
 
195
static const intel_limit_t intel_limits_g4x_sdvo = {
196
	.dot = { .min = 25000, .max = 270000 },
197
	.vco = { .min = 1750000, .max = 3500000},
198
	.n = { .min = 1, .max = 4 },
199
	.m = { .min = 104, .max = 138 },
200
	.m1 = { .min = 17, .max = 23 },
201
	.m2 = { .min = 5, .max = 11 },
202
	.p = { .min = 10, .max = 30 },
203
	.p1 = { .min = 1, .max = 3},
204
	.p2 = { .dot_limit = 270000,
205
		.p2_slow = 10,
206
		.p2_fast = 10
207
	},
208
	.find_pll = intel_g4x_find_best_PLL,
209
};
210
 
211
static const intel_limit_t intel_limits_g4x_hdmi = {
212
	.dot = { .min = 22000, .max = 400000 },
213
	.vco = { .min = 1750000, .max = 3500000},
214
	.n = { .min = 1, .max = 4 },
215
	.m = { .min = 104, .max = 138 },
216
	.m1 = { .min = 16, .max = 23 },
217
	.m2 = { .min = 5, .max = 11 },
218
	.p = { .min = 5, .max = 80 },
219
	.p1 = { .min = 1, .max = 8},
220
	.p2 = { .dot_limit = 165000,
221
		.p2_slow = 10, .p2_fast = 5 },
222
	.find_pll = intel_g4x_find_best_PLL,
223
};
224
 
225
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
226
	.dot = { .min = 20000, .max = 115000 },
227
	.vco = { .min = 1750000, .max = 3500000 },
228
	.n = { .min = 1, .max = 3 },
229
	.m = { .min = 104, .max = 138 },
230
	.m1 = { .min = 17, .max = 23 },
231
	.m2 = { .min = 5, .max = 11 },
232
	.p = { .min = 28, .max = 112 },
233
	.p1 = { .min = 2, .max = 8 },
234
	.p2 = { .dot_limit = 0,
235
		.p2_slow = 14, .p2_fast = 14
236
	},
237
	.find_pll = intel_g4x_find_best_PLL,
238
};
239
 
240
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
241
	.dot = { .min = 80000, .max = 224000 },
242
	.vco = { .min = 1750000, .max = 3500000 },
243
	.n = { .min = 1, .max = 3 },
244
	.m = { .min = 104, .max = 138 },
245
	.m1 = { .min = 17, .max = 23 },
246
	.m2 = { .min = 5, .max = 11 },
247
	.p = { .min = 14, .max = 42 },
248
	.p1 = { .min = 2, .max = 6 },
249
	.p2 = { .dot_limit = 0,
250
		.p2_slow = 7, .p2_fast = 7
251
	},
252
	.find_pll = intel_g4x_find_best_PLL,
253
};
254
 
255
static const intel_limit_t intel_limits_g4x_display_port = {
256
        .dot = { .min = 161670, .max = 227000 },
257
        .vco = { .min = 1750000, .max = 3500000},
258
        .n = { .min = 1, .max = 2 },
259
        .m = { .min = 97, .max = 108 },
260
        .m1 = { .min = 0x10, .max = 0x12 },
261
        .m2 = { .min = 0x05, .max = 0x06 },
262
        .p = { .min = 10, .max = 20 },
263
        .p1 = { .min = 1, .max = 2},
264
        .p2 = { .dot_limit = 0,
265
		.p2_slow = 10, .p2_fast = 10 },
266
        .find_pll = intel_find_pll_g4x_dp,
267
};
268
 
269
static const intel_limit_t intel_limits_pineview_sdvo = {
270
        .dot = { .min = 20000, .max = 400000},
271
        .vco = { .min = 1700000, .max = 3500000 },
272
	/* Pineview's Ncounter is a ring counter */
273
        .n = { .min = 3, .max = 6 },
274
        .m = { .min = 2, .max = 256 },
275
	/* Pineview only has one combined m divider, which we treat as m2. */
276
        .m1 = { .min = 0, .max = 0 },
277
        .m2 = { .min = 0, .max = 254 },
278
        .p = { .min = 5, .max = 80 },
279
        .p1 = { .min = 1, .max = 8 },
280
	.p2 = { .dot_limit = 200000,
281
		.p2_slow = 10, .p2_fast = 5 },
282
	.find_pll = intel_find_best_PLL,
283
};
284
 
285
static const intel_limit_t intel_limits_pineview_lvds = {
286
        .dot = { .min = 20000, .max = 400000 },
287
        .vco = { .min = 1700000, .max = 3500000 },
288
        .n = { .min = 3, .max = 6 },
289
        .m = { .min = 2, .max = 256 },
290
        .m1 = { .min = 0, .max = 0 },
291
        .m2 = { .min = 0, .max = 254 },
292
        .p = { .min = 7, .max = 112 },
293
        .p1 = { .min = 1, .max = 8 },
294
	.p2 = { .dot_limit = 112000,
295
		.p2_slow = 14, .p2_fast = 14 },
296
	.find_pll = intel_find_best_PLL,
297
};
298
 
299
/* Ironlake / Sandybridge
300
 *
301
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
302
 * the range value for them is (actual_value - 2).
303
 */
304
static const intel_limit_t intel_limits_ironlake_dac = {
305
	.dot = { .min = 25000, .max = 350000 },
306
	.vco = { .min = 1760000, .max = 3510000 },
307
	.n = { .min = 1, .max = 5 },
308
	.m = { .min = 79, .max = 127 },
309
	.m1 = { .min = 12, .max = 22 },
310
	.m2 = { .min = 5, .max = 9 },
311
	.p = { .min = 5, .max = 80 },
312
	.p1 = { .min = 1, .max = 8 },
313
	.p2 = { .dot_limit = 225000,
314
		.p2_slow = 10, .p2_fast = 5 },
315
	.find_pll = intel_g4x_find_best_PLL,
316
};
317
 
318
static const intel_limit_t intel_limits_ironlake_single_lvds = {
319
	.dot = { .min = 25000, .max = 350000 },
320
	.vco = { .min = 1760000, .max = 3510000 },
321
	.n = { .min = 1, .max = 3 },
322
	.m = { .min = 79, .max = 118 },
323
	.m1 = { .min = 12, .max = 22 },
324
	.m2 = { .min = 5, .max = 9 },
325
	.p = { .min = 28, .max = 112 },
326
	.p1 = { .min = 2, .max = 8 },
327
	.p2 = { .dot_limit = 225000,
328
		.p2_slow = 14, .p2_fast = 14 },
329
	.find_pll = intel_g4x_find_best_PLL,
330
};
331
 
332
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
333
	.dot = { .min = 25000, .max = 350000 },
334
	.vco = { .min = 1760000, .max = 3510000 },
335
	.n = { .min = 1, .max = 3 },
336
	.m = { .min = 79, .max = 127 },
337
	.m1 = { .min = 12, .max = 22 },
338
	.m2 = { .min = 5, .max = 9 },
339
	.p = { .min = 14, .max = 56 },
340
	.p1 = { .min = 2, .max = 8 },
341
	.p2 = { .dot_limit = 225000,
342
		.p2_slow = 7, .p2_fast = 7 },
343
	.find_pll = intel_g4x_find_best_PLL,
344
};
345
 
346
/* LVDS 100mhz refclk limits. */
347
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
348
	.dot = { .min = 25000, .max = 350000 },
349
	.vco = { .min = 1760000, .max = 3510000 },
350
	.n = { .min = 1, .max = 2 },
351
	.m = { .min = 79, .max = 126 },
352
	.m1 = { .min = 12, .max = 22 },
353
	.m2 = { .min = 5, .max = 9 },
354
	.p = { .min = 28, .max = 112 },
2342 Serge 355
	.p1 = { .min = 2, .max = 8 },
2327 Serge 356
	.p2 = { .dot_limit = 225000,
357
		.p2_slow = 14, .p2_fast = 14 },
358
	.find_pll = intel_g4x_find_best_PLL,
359
};
360
 
361
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
362
	.dot = { .min = 25000, .max = 350000 },
363
	.vco = { .min = 1760000, .max = 3510000 },
364
	.n = { .min = 1, .max = 3 },
365
	.m = { .min = 79, .max = 126 },
366
	.m1 = { .min = 12, .max = 22 },
367
	.m2 = { .min = 5, .max = 9 },
368
	.p = { .min = 14, .max = 42 },
2342 Serge 369
	.p1 = { .min = 2, .max = 6 },
2327 Serge 370
	.p2 = { .dot_limit = 225000,
371
		.p2_slow = 7, .p2_fast = 7 },
372
	.find_pll = intel_g4x_find_best_PLL,
373
};
374
 
375
static const intel_limit_t intel_limits_ironlake_display_port = {
376
        .dot = { .min = 25000, .max = 350000 },
377
        .vco = { .min = 1760000, .max = 3510000},
378
        .n = { .min = 1, .max = 2 },
379
        .m = { .min = 81, .max = 90 },
380
        .m1 = { .min = 12, .max = 22 },
381
        .m2 = { .min = 5, .max = 9 },
382
        .p = { .min = 10, .max = 20 },
383
        .p1 = { .min = 1, .max = 2},
384
        .p2 = { .dot_limit = 0,
385
		.p2_slow = 10, .p2_fast = 10 },
386
        .find_pll = intel_find_pll_ironlake_dp,
387
};
388
 
389
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
390
						int refclk)
391
{
392
	struct drm_device *dev = crtc->dev;
393
	struct drm_i915_private *dev_priv = dev->dev_private;
394
	const intel_limit_t *limit;
395
 
396
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
397
		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
398
		    LVDS_CLKB_POWER_UP) {
399
			/* LVDS dual channel */
400
			if (refclk == 100000)
401
				limit = &intel_limits_ironlake_dual_lvds_100m;
402
			else
403
				limit = &intel_limits_ironlake_dual_lvds;
404
		} else {
405
			if (refclk == 100000)
406
				limit = &intel_limits_ironlake_single_lvds_100m;
407
			else
408
				limit = &intel_limits_ironlake_single_lvds;
409
		}
410
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
411
			HAS_eDP)
412
		limit = &intel_limits_ironlake_display_port;
413
	else
414
		limit = &intel_limits_ironlake_dac;
415
 
416
	return limit;
417
}
418
 
419
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
420
{
421
	struct drm_device *dev = crtc->dev;
422
	struct drm_i915_private *dev_priv = dev->dev_private;
423
	const intel_limit_t *limit;
424
 
425
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
426
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
427
		    LVDS_CLKB_POWER_UP)
428
			/* LVDS with dual channel */
429
			limit = &intel_limits_g4x_dual_channel_lvds;
430
		else
431
			/* LVDS with dual channel */
432
			limit = &intel_limits_g4x_single_channel_lvds;
433
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
434
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
435
		limit = &intel_limits_g4x_hdmi;
436
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
437
		limit = &intel_limits_g4x_sdvo;
2342 Serge 438
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2327 Serge 439
		limit = &intel_limits_g4x_display_port;
440
	} else /* The option is for other outputs */
441
		limit = &intel_limits_i9xx_sdvo;
442
 
443
	return limit;
444
}
445
 
446
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
447
{
448
	struct drm_device *dev = crtc->dev;
449
	const intel_limit_t *limit;
450
 
451
	if (HAS_PCH_SPLIT(dev))
452
		limit = intel_ironlake_limit(crtc, refclk);
453
	else if (IS_G4X(dev)) {
454
		limit = intel_g4x_limit(crtc);
455
	} else if (IS_PINEVIEW(dev)) {
456
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
457
			limit = &intel_limits_pineview_lvds;
458
		else
459
			limit = &intel_limits_pineview_sdvo;
460
	} else if (!IS_GEN2(dev)) {
461
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
462
			limit = &intel_limits_i9xx_lvds;
463
		else
464
			limit = &intel_limits_i9xx_sdvo;
465
	} else {
466
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
467
			limit = &intel_limits_i8xx_lvds;
468
		else
469
			limit = &intel_limits_i8xx_dvo;
470
	}
471
	return limit;
472
}
473
 
474
/* m1 is reserved as 0 in Pineview, n is a ring counter */
475
static void pineview_clock(int refclk, intel_clock_t *clock)
476
{
477
	clock->m = clock->m2 + 2;
478
	clock->p = clock->p1 * clock->p2;
479
	clock->vco = refclk * clock->m / clock->n;
480
	clock->dot = clock->vco / clock->p;
481
}
482
 
483
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
484
{
485
	if (IS_PINEVIEW(dev)) {
486
		pineview_clock(refclk, clock);
487
		return;
488
	}
489
	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
490
	clock->p = clock->p1 * clock->p2;
491
	clock->vco = refclk * clock->m / (clock->n + 2);
492
	clock->dot = clock->vco / clock->p;
493
}
494
 
495
/**
496
 * Returns whether any output on the specified pipe is of the specified type
497
 */
498
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
499
{
500
	struct drm_device *dev = crtc->dev;
501
	struct drm_mode_config *mode_config = &dev->mode_config;
502
	struct intel_encoder *encoder;
503
 
504
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
505
		if (encoder->base.crtc == crtc && encoder->type == type)
506
			return true;
507
 
508
	return false;
509
}
510
 
511
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
512
/**
513
 * Returns whether the given set of divisors are valid for a given refclk with
514
 * the given connectors.
515
 */
516
 
517
static bool intel_PLL_is_valid(struct drm_device *dev,
518
			       const intel_limit_t *limit,
519
			       const intel_clock_t *clock)
520
{
521
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
2342 Serge 522
		INTELPllInvalid("p1 out of range\n");
2327 Serge 523
	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
2342 Serge 524
		INTELPllInvalid("p out of range\n");
2327 Serge 525
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
2342 Serge 526
		INTELPllInvalid("m2 out of range\n");
2327 Serge 527
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
2342 Serge 528
		INTELPllInvalid("m1 out of range\n");
2327 Serge 529
	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
2342 Serge 530
		INTELPllInvalid("m1 <= m2\n");
2327 Serge 531
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
2342 Serge 532
		INTELPllInvalid("m out of range\n");
2327 Serge 533
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
2342 Serge 534
		INTELPllInvalid("n out of range\n");
2327 Serge 535
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
2342 Serge 536
		INTELPllInvalid("vco out of range\n");
2327 Serge 537
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
538
	 * connector, etc., rather than just a single range.
539
	 */
540
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
2342 Serge 541
		INTELPllInvalid("dot out of range\n");
2327 Serge 542
 
543
	return true;
544
}
545
 
546
static bool
547
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
548
		    int target, int refclk, intel_clock_t *best_clock)
549
 
550
{
551
	struct drm_device *dev = crtc->dev;
552
	struct drm_i915_private *dev_priv = dev->dev_private;
553
	intel_clock_t clock;
554
	int err = target;
555
 
556
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
557
	    (I915_READ(LVDS)) != 0) {
558
		/*
559
		 * For LVDS, if the panel is on, just rely on its current
560
		 * settings for dual-channel.  We haven't figured out how to
561
		 * reliably set up different single/dual channel state, if we
562
		 * even can.
563
		 */
564
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
565
		    LVDS_CLKB_POWER_UP)
566
			clock.p2 = limit->p2.p2_fast;
567
		else
568
			clock.p2 = limit->p2.p2_slow;
569
	} else {
570
		if (target < limit->p2.dot_limit)
571
			clock.p2 = limit->p2.p2_slow;
572
		else
573
			clock.p2 = limit->p2.p2_fast;
574
	}
575
 
2342 Serge 576
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 577
 
578
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
579
	     clock.m1++) {
580
		for (clock.m2 = limit->m2.min;
581
		     clock.m2 <= limit->m2.max; clock.m2++) {
582
			/* m1 is always 0 in Pineview */
583
			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
584
				break;
585
			for (clock.n = limit->n.min;
586
			     clock.n <= limit->n.max; clock.n++) {
587
				for (clock.p1 = limit->p1.min;
588
					clock.p1 <= limit->p1.max; clock.p1++) {
589
					int this_err;
590
 
591
					intel_clock(dev, refclk, &clock);
592
					if (!intel_PLL_is_valid(dev, limit,
593
								&clock))
594
						continue;
595
 
596
					this_err = abs(clock.dot - target);
597
					if (this_err < err) {
598
						*best_clock = clock;
599
						err = this_err;
600
					}
601
				}
602
			}
603
		}
604
	}
605
 
606
	return (err != target);
607
}
608
 
609
static bool
610
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
611
			int target, int refclk, intel_clock_t *best_clock)
612
{
613
	struct drm_device *dev = crtc->dev;
614
	struct drm_i915_private *dev_priv = dev->dev_private;
615
	intel_clock_t clock;
616
	int max_n;
617
	bool found;
618
	/* approximately equals target * 0.00585 */
619
	int err_most = (target >> 8) + (target >> 9);
620
	found = false;
621
 
622
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
623
		int lvds_reg;
624
 
625
		if (HAS_PCH_SPLIT(dev))
626
			lvds_reg = PCH_LVDS;
627
		else
628
			lvds_reg = LVDS;
629
		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
630
		    LVDS_CLKB_POWER_UP)
631
			clock.p2 = limit->p2.p2_fast;
632
		else
633
			clock.p2 = limit->p2.p2_slow;
634
	} else {
635
		if (target < limit->p2.dot_limit)
636
			clock.p2 = limit->p2.p2_slow;
637
		else
638
			clock.p2 = limit->p2.p2_fast;
639
	}
640
 
641
	memset(best_clock, 0, sizeof(*best_clock));
642
	max_n = limit->n.max;
643
	/* based on hardware requirement, prefer smaller n to precision */
644
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
645
		/* based on hardware requirement, prefere larger m1,m2 */
646
		for (clock.m1 = limit->m1.max;
647
		     clock.m1 >= limit->m1.min; clock.m1--) {
648
			for (clock.m2 = limit->m2.max;
649
			     clock.m2 >= limit->m2.min; clock.m2--) {
650
				for (clock.p1 = limit->p1.max;
651
				     clock.p1 >= limit->p1.min; clock.p1--) {
652
					int this_err;
653
 
654
					intel_clock(dev, refclk, &clock);
655
					if (!intel_PLL_is_valid(dev, limit,
656
								&clock))
657
						continue;
658
 
659
					this_err = abs(clock.dot - target);
660
					if (this_err < err_most) {
661
						*best_clock = clock;
662
						err_most = this_err;
663
						max_n = clock.n;
664
						found = true;
665
					}
666
				}
667
			}
668
		}
669
	}
670
	return found;
671
}
672
 
673
static bool
674
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
675
			   int target, int refclk, intel_clock_t *best_clock)
676
{
677
	struct drm_device *dev = crtc->dev;
678
	intel_clock_t clock;
679
 
680
	if (target < 200000) {
681
		clock.n = 1;
682
		clock.p1 = 2;
683
		clock.p2 = 10;
684
		clock.m1 = 12;
685
		clock.m2 = 9;
686
	} else {
687
		clock.n = 2;
688
		clock.p1 = 1;
689
		clock.p2 = 10;
690
		clock.m1 = 14;
691
		clock.m2 = 8;
692
	}
693
	intel_clock(dev, refclk, &clock);
694
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
695
	return true;
696
}
697
 
698
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
699
static bool
700
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
701
		      int target, int refclk, intel_clock_t *best_clock)
702
{
703
	intel_clock_t clock;
704
	if (target < 200000) {
705
		clock.p1 = 2;
706
		clock.p2 = 10;
707
		clock.n = 2;
708
		clock.m1 = 23;
709
		clock.m2 = 8;
710
	} else {
711
		clock.p1 = 1;
712
		clock.p2 = 10;
713
		clock.n = 1;
714
		clock.m1 = 14;
715
		clock.m2 = 2;
716
	}
717
	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
718
	clock.p = (clock.p1 * clock.p2);
719
	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
720
	clock.vco = 0;
721
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
722
	return true;
723
}
724
 
725
/**
726
 * intel_wait_for_vblank - wait for vblank on a given pipe
727
 * @dev: drm device
728
 * @pipe: pipe to wait for
729
 *
730
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
731
 * mode setting code.
732
 */
733
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
734
{
735
	struct drm_i915_private *dev_priv = dev->dev_private;
736
	int pipestat_reg = PIPESTAT(pipe);
737
 
738
	/* Clear existing vblank status. Note this will clear any other
739
	 * sticky status fields as well.
740
	 *
741
	 * This races with i915_driver_irq_handler() with the result
742
	 * that either function could miss a vblank event.  Here it is not
743
	 * fatal, as we will either wait upon the next vblank interrupt or
744
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
745
	 * called during modeset at which time the GPU should be idle and
746
	 * should *not* be performing page flips and thus not waiting on
747
	 * vblanks...
748
	 * Currently, the result of us stealing a vblank from the irq
749
	 * handler is that a single frame will be skipped during swapbuffers.
750
	 */
751
	I915_WRITE(pipestat_reg,
752
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
753
 
754
	/* Wait for vblank interrupt bit to set */
755
	if (wait_for(I915_READ(pipestat_reg) &
756
		     PIPE_VBLANK_INTERRUPT_STATUS,
757
		     50))
758
		DRM_DEBUG_KMS("vblank wait timed out\n");
759
}
760
 
761
/*
762
 * intel_wait_for_pipe_off - wait for pipe to turn off
763
 * @dev: drm device
764
 * @pipe: pipe to wait for
765
 *
766
 * After disabling a pipe, we can't wait for vblank in the usual way,
767
 * spinning on the vblank interrupt status bit, since we won't actually
768
 * see an interrupt when the pipe is disabled.
769
 *
770
 * On Gen4 and above:
771
 *   wait for the pipe register state bit to turn off
772
 *
773
 * Otherwise:
774
 *   wait for the display line value to settle (it usually
775
 *   ends up stopping at the start of the next frame).
776
 *
777
 */
778
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
779
{
780
	struct drm_i915_private *dev_priv = dev->dev_private;
781
 
782
	if (INTEL_INFO(dev)->gen >= 4) {
783
		int reg = PIPECONF(pipe);
784
 
785
		/* Wait for the Pipe State to go off */
786
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
787
			     100))
788
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
789
	} else {
790
		u32 last_line;
791
		int reg = PIPEDSL(pipe);
792
		unsigned long timeout = jiffies + msecs_to_jiffies(100);
793
 
794
		/* Wait for the display line to settle */
795
		do {
796
			last_line = I915_READ(reg) & DSL_LINEMASK;
797
			mdelay(5);
798
		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
799
			 time_after(timeout, jiffies));
800
		if (time_after(jiffies, timeout))
801
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
802
	}
803
}
804
 
805
static const char *state_string(bool enabled)
806
{
807
	return enabled ? "on" : "off";
808
}
809
 
810
/* Only for pre-ILK configs */
811
static void assert_pll(struct drm_i915_private *dev_priv,
812
		       enum pipe pipe, bool state)
813
{
814
	int reg;
815
	u32 val;
816
	bool cur_state;
817
 
818
	reg = DPLL(pipe);
819
	val = I915_READ(reg);
820
	cur_state = !!(val & DPLL_VCO_ENABLE);
821
	WARN(cur_state != state,
822
	     "PLL state assertion failure (expected %s, current %s)\n",
823
	     state_string(state), state_string(cur_state));
824
}
825
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
826
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
827
 
828
/* For ILK+ */
829
static void assert_pch_pll(struct drm_i915_private *dev_priv,
830
			   enum pipe pipe, bool state)
831
{
832
	int reg;
833
	u32 val;
834
	bool cur_state;
835
 
2342 Serge 836
	if (HAS_PCH_CPT(dev_priv->dev)) {
837
		u32 pch_dpll;
838
 
839
		pch_dpll = I915_READ(PCH_DPLL_SEL);
840
 
841
		/* Make sure the selected PLL is enabled to the transcoder */
842
		WARN(!((pch_dpll >> (4 * pipe)) & 8),
843
		     "transcoder %d PLL not enabled\n", pipe);
844
 
845
		/* Convert the transcoder pipe number to a pll pipe number */
846
		pipe = (pch_dpll >> (4 * pipe)) & 1;
847
	}
848
 
2327 Serge 849
	reg = PCH_DPLL(pipe);
850
	val = I915_READ(reg);
851
	cur_state = !!(val & DPLL_VCO_ENABLE);
852
	WARN(cur_state != state,
853
	     "PCH PLL state assertion failure (expected %s, current %s)\n",
854
	     state_string(state), state_string(cur_state));
855
}
856
#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
857
#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
858
 
859
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
860
			  enum pipe pipe, bool state)
861
{
862
	int reg;
863
	u32 val;
864
	bool cur_state;
865
 
866
	reg = FDI_TX_CTL(pipe);
867
	val = I915_READ(reg);
868
	cur_state = !!(val & FDI_TX_ENABLE);
869
	WARN(cur_state != state,
870
	     "FDI TX state assertion failure (expected %s, current %s)\n",
871
	     state_string(state), state_string(cur_state));
872
}
873
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
874
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
875
 
876
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
877
			  enum pipe pipe, bool state)
878
{
879
	int reg;
880
	u32 val;
881
	bool cur_state;
882
 
883
	reg = FDI_RX_CTL(pipe);
884
	val = I915_READ(reg);
885
	cur_state = !!(val & FDI_RX_ENABLE);
886
	WARN(cur_state != state,
887
	     "FDI RX state assertion failure (expected %s, current %s)\n",
888
	     state_string(state), state_string(cur_state));
889
}
890
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
891
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
892
 
893
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
894
				      enum pipe pipe)
895
{
896
	int reg;
897
	u32 val;
898
 
899
	/* ILK FDI PLL is always enabled */
900
	if (dev_priv->info->gen == 5)
901
		return;
902
 
903
	reg = FDI_TX_CTL(pipe);
904
	val = I915_READ(reg);
905
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
906
}
907
 
908
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
909
				      enum pipe pipe)
910
{
911
	int reg;
912
	u32 val;
913
 
914
	reg = FDI_RX_CTL(pipe);
915
	val = I915_READ(reg);
916
	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
917
}
918
 
919
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
920
				  enum pipe pipe)
921
{
922
	int pp_reg, lvds_reg;
923
	u32 val;
924
	enum pipe panel_pipe = PIPE_A;
925
	bool locked = true;
926
 
927
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
928
		pp_reg = PCH_PP_CONTROL;
929
		lvds_reg = PCH_LVDS;
930
	} else {
931
		pp_reg = PP_CONTROL;
932
		lvds_reg = LVDS;
933
	}
934
 
935
	val = I915_READ(pp_reg);
936
	if (!(val & PANEL_POWER_ON) ||
937
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
938
		locked = false;
939
 
940
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
941
		panel_pipe = PIPE_B;
942
 
943
	WARN(panel_pipe == pipe && locked,
944
	     "panel assertion failure, pipe %c regs locked\n",
945
	     pipe_name(pipe));
946
}
947
 
2342 Serge 948
void assert_pipe(struct drm_i915_private *dev_priv,
2327 Serge 949
			enum pipe pipe, bool state)
950
{
951
	int reg;
952
	u32 val;
953
	bool cur_state;
954
 
955
	reg = PIPECONF(pipe);
956
	val = I915_READ(reg);
957
	cur_state = !!(val & PIPECONF_ENABLE);
958
	WARN(cur_state != state,
959
	     "pipe %c assertion failure (expected %s, current %s)\n",
960
	     pipe_name(pipe), state_string(state), state_string(cur_state));
961
}
962
 
963
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
964
				 enum plane plane)
965
{
966
	int reg;
967
	u32 val;
968
 
969
	reg = DSPCNTR(plane);
970
	val = I915_READ(reg);
971
	WARN(!(val & DISPLAY_PLANE_ENABLE),
972
	     "plane %c assertion failure, should be active but is disabled\n",
973
	     plane_name(plane));
974
}
975
 
976
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
977
				   enum pipe pipe)
978
{
979
	int reg, i;
980
	u32 val;
981
	int cur_pipe;
982
 
983
	/* Planes are fixed to pipes on ILK+ */
984
	if (HAS_PCH_SPLIT(dev_priv->dev))
985
		return;
986
 
987
	/* Need to check both planes against the pipe */
988
	for (i = 0; i < 2; i++) {
989
		reg = DSPCNTR(i);
990
		val = I915_READ(reg);
991
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
992
			DISPPLANE_SEL_PIPE_SHIFT;
993
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
994
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
995
		     plane_name(i), pipe_name(pipe));
996
	}
997
}
998
 
999
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1000
{
1001
	u32 val;
1002
	bool enabled;
1003
 
1004
	val = I915_READ(PCH_DREF_CONTROL);
1005
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1006
			    DREF_SUPERSPREAD_SOURCE_MASK));
1007
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1008
}
1009
 
1010
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1011
				       enum pipe pipe)
1012
{
1013
	int reg;
1014
	u32 val;
1015
	bool enabled;
1016
 
1017
	reg = TRANSCONF(pipe);
1018
	val = I915_READ(reg);
1019
	enabled = !!(val & TRANS_ENABLE);
1020
	WARN(enabled,
1021
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1022
	     pipe_name(pipe));
1023
}
1024
 
1025
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1026
			    enum pipe pipe, u32 port_sel, u32 val)
1027
{
1028
	if ((val & DP_PORT_EN) == 0)
1029
		return false;
1030
 
1031
	if (HAS_PCH_CPT(dev_priv->dev)) {
1032
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1033
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1034
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1035
			return false;
1036
	} else {
1037
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1038
			return false;
1039
	}
1040
	return true;
1041
}
1042
 
1043
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1044
			      enum pipe pipe, u32 val)
1045
{
1046
	if ((val & PORT_ENABLE) == 0)
1047
		return false;
1048
 
1049
	if (HAS_PCH_CPT(dev_priv->dev)) {
1050
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1051
			return false;
1052
	} else {
1053
		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1054
			return false;
1055
	}
1056
	return true;
1057
}
1058
 
1059
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1060
			      enum pipe pipe, u32 val)
1061
{
1062
	if ((val & LVDS_PORT_EN) == 0)
1063
		return false;
1064
 
1065
	if (HAS_PCH_CPT(dev_priv->dev)) {
1066
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1067
			return false;
1068
	} else {
1069
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1070
			return false;
1071
	}
1072
	return true;
1073
}
1074
 
1075
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1076
			      enum pipe pipe, u32 val)
1077
{
1078
	if ((val & ADPA_DAC_ENABLE) == 0)
1079
		return false;
1080
	if (HAS_PCH_CPT(dev_priv->dev)) {
1081
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1082
			return false;
1083
	} else {
1084
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1085
			return false;
1086
	}
1087
	return true;
1088
}
1089
 
1090
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1091
				   enum pipe pipe, int reg, u32 port_sel)
1092
{
1093
	u32 val = I915_READ(reg);
1094
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1095
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1096
	     reg, pipe_name(pipe));
1097
}
1098
 
1099
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1100
				     enum pipe pipe, int reg)
1101
{
1102
	u32 val = I915_READ(reg);
1103
	WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1104
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1105
	     reg, pipe_name(pipe));
1106
}
1107
 
1108
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1109
				      enum pipe pipe)
1110
{
1111
	int reg;
1112
	u32 val;
1113
 
1114
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1115
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1116
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1117
 
1118
	reg = PCH_ADPA;
1119
	val = I915_READ(reg);
1120
	WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1121
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1122
	     pipe_name(pipe));
1123
 
1124
	reg = PCH_LVDS;
1125
	val = I915_READ(reg);
1126
	WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1127
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1128
	     pipe_name(pipe));
1129
 
1130
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1131
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1132
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1133
}
1134
 
1135
/**
1136
 * intel_enable_pll - enable a PLL
1137
 * @dev_priv: i915 private structure
1138
 * @pipe: pipe PLL to enable
1139
 *
1140
 * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1141
 * make sure the PLL reg is writable first though, since the panel write
1142
 * protect mechanism may be enabled.
1143
 *
1144
 * Note!  This is for pre-ILK only.
1145
 */
1146
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1147
{
1148
    int reg;
1149
    u32 val;
1150
 
1151
    /* No really, not for ILK+ */
1152
    BUG_ON(dev_priv->info->gen >= 5);
1153
 
1154
    /* PLL is protected by panel, make sure we can write it */
1155
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1156
        assert_panel_unlocked(dev_priv, pipe);
1157
 
1158
    reg = DPLL(pipe);
1159
    val = I915_READ(reg);
1160
    val |= DPLL_VCO_ENABLE;
1161
 
1162
    /* We do this three times for luck */
1163
    I915_WRITE(reg, val);
1164
    POSTING_READ(reg);
1165
    udelay(150); /* wait for warmup */
1166
    I915_WRITE(reg, val);
1167
    POSTING_READ(reg);
1168
    udelay(150); /* wait for warmup */
1169
    I915_WRITE(reg, val);
1170
    POSTING_READ(reg);
1171
    udelay(150); /* wait for warmup */
1172
}
1173
 
1174
/**
1175
 * intel_disable_pll - disable a PLL
1176
 * @dev_priv: i915 private structure
1177
 * @pipe: pipe PLL to disable
1178
 *
1179
 * Disable the PLL for @pipe, making sure the pipe is off first.
1180
 *
1181
 * Note!  This is for pre-ILK only.
1182
 */
1183
static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1184
{
1185
	int reg;
1186
	u32 val;
1187
 
1188
	/* Don't disable pipe A or pipe A PLLs if needed */
1189
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1190
		return;
1191
 
1192
	/* Make sure the pipe isn't still relying on us */
1193
	assert_pipe_disabled(dev_priv, pipe);
1194
 
1195
	reg = DPLL(pipe);
1196
	val = I915_READ(reg);
1197
	val &= ~DPLL_VCO_ENABLE;
1198
	I915_WRITE(reg, val);
1199
	POSTING_READ(reg);
1200
}
1201
 
1202
/**
1203
 * intel_enable_pch_pll - enable PCH PLL
1204
 * @dev_priv: i915 private structure
1205
 * @pipe: pipe PLL to enable
1206
 *
1207
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1208
 * drives the transcoder clock.
1209
 */
1210
static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1211
				 enum pipe pipe)
1212
{
1213
	int reg;
1214
	u32 val;
1215
 
2342 Serge 1216
	if (pipe > 1)
1217
		return;
1218
 
2327 Serge 1219
	/* PCH only available on ILK+ */
1220
	BUG_ON(dev_priv->info->gen < 5);
1221
 
1222
	/* PCH refclock must be enabled first */
1223
	assert_pch_refclk_enabled(dev_priv);
1224
 
1225
	reg = PCH_DPLL(pipe);
1226
	val = I915_READ(reg);
1227
	val |= DPLL_VCO_ENABLE;
1228
	I915_WRITE(reg, val);
1229
	POSTING_READ(reg);
1230
	udelay(200);
1231
}
1232
 
1233
static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1234
				  enum pipe pipe)
1235
{
1236
	int reg;
2342 Serge 1237
	u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1238
		pll_sel = TRANSC_DPLL_ENABLE;
2327 Serge 1239
 
2342 Serge 1240
	if (pipe > 1)
1241
		return;
1242
 
2327 Serge 1243
	/* PCH only available on ILK+ */
1244
	BUG_ON(dev_priv->info->gen < 5);
1245
 
1246
	/* Make sure transcoder isn't still depending on us */
1247
	assert_transcoder_disabled(dev_priv, pipe);
1248
 
2342 Serge 1249
	if (pipe == 0)
1250
		pll_sel |= TRANSC_DPLLA_SEL;
1251
	else if (pipe == 1)
1252
		pll_sel |= TRANSC_DPLLB_SEL;
1253
 
1254
 
1255
	if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1256
		return;
1257
 
2327 Serge 1258
	reg = PCH_DPLL(pipe);
1259
	val = I915_READ(reg);
1260
	val &= ~DPLL_VCO_ENABLE;
1261
	I915_WRITE(reg, val);
1262
	POSTING_READ(reg);
1263
	udelay(200);
1264
}
1265
 
1266
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1267
				    enum pipe pipe)
1268
{
1269
	int reg;
1270
	u32 val;
1271
 
1272
	/* PCH only available on ILK+ */
1273
	BUG_ON(dev_priv->info->gen < 5);
1274
 
1275
	/* Make sure PCH DPLL is enabled */
1276
	assert_pch_pll_enabled(dev_priv, pipe);
1277
 
1278
	/* FDI must be feeding us bits for PCH ports */
1279
	assert_fdi_tx_enabled(dev_priv, pipe);
1280
	assert_fdi_rx_enabled(dev_priv, pipe);
1281
 
1282
	reg = TRANSCONF(pipe);
1283
	val = I915_READ(reg);
1284
 
1285
	if (HAS_PCH_IBX(dev_priv->dev)) {
1286
		/*
1287
		 * make the BPC in transcoder be consistent with
1288
		 * that in pipeconf reg.
1289
		 */
1290
		val &= ~PIPE_BPC_MASK;
1291
		val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1292
	}
1293
	I915_WRITE(reg, val | TRANS_ENABLE);
1294
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1295
		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1296
}
1297
 
1298
static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1299
				     enum pipe pipe)
1300
{
1301
	int reg;
1302
	u32 val;
1303
 
1304
	/* FDI relies on the transcoder */
1305
	assert_fdi_tx_disabled(dev_priv, pipe);
1306
	assert_fdi_rx_disabled(dev_priv, pipe);
1307
 
1308
	/* Ports must be off as well */
1309
	assert_pch_ports_disabled(dev_priv, pipe);
1310
 
1311
	reg = TRANSCONF(pipe);
1312
	val = I915_READ(reg);
1313
	val &= ~TRANS_ENABLE;
1314
	I915_WRITE(reg, val);
1315
	/* wait for PCH transcoder off, transcoder state */
1316
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2342 Serge 1317
		DRM_ERROR("failed to disable transcoder %d\n", pipe);
2327 Serge 1318
}
1319
 
1320
/**
1321
 * intel_enable_pipe - enable a pipe, asserting requirements
1322
 * @dev_priv: i915 private structure
1323
 * @pipe: pipe to enable
1324
 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1325
 *
1326
 * Enable @pipe, making sure that various hardware specific requirements
1327
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1328
 *
1329
 * @pipe should be %PIPE_A or %PIPE_B.
1330
 *
1331
 * Will wait until the pipe is actually running (i.e. first vblank) before
1332
 * returning.
1333
 */
1334
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1335
			      bool pch_port)
1336
{
1337
	int reg;
1338
	u32 val;
1339
 
1340
	/*
1341
	 * A pipe without a PLL won't actually be able to drive bits from
1342
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1343
	 * need the check.
1344
	 */
1345
	if (!HAS_PCH_SPLIT(dev_priv->dev))
1346
		assert_pll_enabled(dev_priv, pipe);
1347
	else {
1348
		if (pch_port) {
1349
			/* if driving the PCH, we need FDI enabled */
1350
			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1351
			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1352
		}
1353
		/* FIXME: assert CPU port conditions for SNB+ */
1354
	}
1355
 
1356
	reg = PIPECONF(pipe);
1357
	val = I915_READ(reg);
1358
	if (val & PIPECONF_ENABLE)
1359
		return;
1360
 
1361
	I915_WRITE(reg, val | PIPECONF_ENABLE);
1362
	intel_wait_for_vblank(dev_priv->dev, pipe);
1363
}
1364
 
1365
/**
1366
 * intel_disable_pipe - disable a pipe, asserting requirements
1367
 * @dev_priv: i915 private structure
1368
 * @pipe: pipe to disable
1369
 *
1370
 * Disable @pipe, making sure that various hardware specific requirements
1371
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1372
 *
1373
 * @pipe should be %PIPE_A or %PIPE_B.
1374
 *
1375
 * Will wait until the pipe has shut down before returning.
1376
 */
1377
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1378
			       enum pipe pipe)
1379
{
1380
	int reg;
1381
	u32 val;
1382
 
1383
	/*
1384
	 * Make sure planes won't keep trying to pump pixels to us,
1385
	 * or we might hang the display.
1386
	 */
1387
	assert_planes_disabled(dev_priv, pipe);
1388
 
1389
	/* Don't disable pipe A or pipe A PLLs if needed */
1390
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1391
		return;
1392
 
1393
	reg = PIPECONF(pipe);
1394
	val = I915_READ(reg);
1395
	if ((val & PIPECONF_ENABLE) == 0)
1396
		return;
1397
 
1398
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1399
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1400
}
1401
 
1402
/*
1403
 * Plane regs are double buffered, going from enabled->disabled needs a
1404
 * trigger in order to latch.  The display address reg provides this.
1405
 */
1406
static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1407
				      enum plane plane)
1408
{
1409
	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1410
	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1411
}
1412
 
1413
/**
1414
 * intel_enable_plane - enable a display plane on a given pipe
1415
 * @dev_priv: i915 private structure
1416
 * @plane: plane to enable
1417
 * @pipe: pipe being fed
1418
 *
1419
 * Enable @plane on @pipe, making sure that @pipe is running first.
1420
 */
1421
static void intel_enable_plane(struct drm_i915_private *dev_priv,
1422
			       enum plane plane, enum pipe pipe)
1423
{
1424
	int reg;
1425
	u32 val;
1426
 
1427
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1428
	assert_pipe_enabled(dev_priv, pipe);
1429
 
1430
	reg = DSPCNTR(plane);
1431
	val = I915_READ(reg);
1432
	if (val & DISPLAY_PLANE_ENABLE)
1433
		return;
1434
 
1435
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1436
	intel_flush_display_plane(dev_priv, plane);
1437
	intel_wait_for_vblank(dev_priv->dev, pipe);
1438
}
1439
 
1440
/**
1441
 * intel_disable_plane - disable a display plane
1442
 * @dev_priv: i915 private structure
1443
 * @plane: plane to disable
1444
 * @pipe: pipe consuming the data
1445
 *
1446
 * Disable @plane; should be an independent operation.
1447
 */
1448
static void intel_disable_plane(struct drm_i915_private *dev_priv,
1449
				enum plane plane, enum pipe pipe)
1450
{
1451
	int reg;
1452
	u32 val;
1453
 
1454
	reg = DSPCNTR(plane);
1455
	val = I915_READ(reg);
1456
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1457
		return;
1458
 
1459
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1460
	intel_flush_display_plane(dev_priv, plane);
1461
	intel_wait_for_vblank(dev_priv->dev, pipe);
1462
}
1463
 
1464
static void disable_pch_dp(struct drm_i915_private *dev_priv,
1465
			   enum pipe pipe, int reg, u32 port_sel)
1466
{
1467
	u32 val = I915_READ(reg);
1468
	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1469
		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1470
		I915_WRITE(reg, val & ~DP_PORT_EN);
1471
	}
1472
}
1473
 
1474
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1475
			     enum pipe pipe, int reg)
1476
{
1477
	u32 val = I915_READ(reg);
1478
	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1479
		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1480
			      reg, pipe);
1481
		I915_WRITE(reg, val & ~PORT_ENABLE);
1482
	}
1483
}
1484
 
1485
/* Disable any ports connected to this transcoder */
1486
static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1487
				    enum pipe pipe)
1488
{
1489
	u32 reg, val;
1490
 
1491
	val = I915_READ(PCH_PP_CONTROL);
1492
	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1493
 
1494
	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1495
	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1496
	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1497
 
1498
	reg = PCH_ADPA;
1499
	val = I915_READ(reg);
1500
	if (adpa_pipe_enabled(dev_priv, val, pipe))
1501
		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1502
 
1503
	reg = PCH_LVDS;
1504
	val = I915_READ(reg);
1505
	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1506
		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1507
		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1508
		POSTING_READ(reg);
1509
		udelay(100);
1510
	}
1511
 
1512
	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1513
	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1514
	disable_pch_hdmi(dev_priv, pipe, HDMID);
1515
}
1516
 
1517
static void i8xx_disable_fbc(struct drm_device *dev)
1518
{
1519
    struct drm_i915_private *dev_priv = dev->dev_private;
1520
    u32 fbc_ctl;
1521
 
1522
    /* Disable compression */
1523
    fbc_ctl = I915_READ(FBC_CONTROL);
1524
    if ((fbc_ctl & FBC_CTL_EN) == 0)
1525
        return;
1526
 
1527
    fbc_ctl &= ~FBC_CTL_EN;
1528
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1529
 
1530
    /* Wait for compressing bit to clear */
1531
    if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1532
        DRM_DEBUG_KMS("FBC idle timed out\n");
1533
        return;
1534
    }
1535
 
1536
    DRM_DEBUG_KMS("disabled FBC\n");
1537
}
1538
 
1539
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1540
{
1541
    struct drm_device *dev = crtc->dev;
1542
    struct drm_i915_private *dev_priv = dev->dev_private;
1543
    struct drm_framebuffer *fb = crtc->fb;
1544
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1545
    struct drm_i915_gem_object *obj = intel_fb->obj;
1546
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1547
    int cfb_pitch;
1548
    int plane, i;
1549
    u32 fbc_ctl, fbc_ctl2;
1550
 
1551
    cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
2342 Serge 1552
	if (fb->pitches[0] < cfb_pitch)
1553
		cfb_pitch = fb->pitches[0];
2327 Serge 1554
 
1555
    /* FBC_CTL wants 64B units */
1556
    cfb_pitch = (cfb_pitch / 64) - 1;
1557
    plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1558
 
1559
    /* Clear old tags */
1560
    for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1561
        I915_WRITE(FBC_TAG + (i * 4), 0);
1562
 
1563
    /* Set it up... */
1564
    fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1565
    fbc_ctl2 |= plane;
1566
    I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1567
    I915_WRITE(FBC_FENCE_OFF, crtc->y);
1568
 
1569
    /* enable it... */
1570
    fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1571
    if (IS_I945GM(dev))
1572
        fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1573
    fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1574
    fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1575
    fbc_ctl |= obj->fence_reg;
1576
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1577
 
1578
    DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1579
              cfb_pitch, crtc->y, intel_crtc->plane);
1580
}
1581
 
1582
static bool i8xx_fbc_enabled(struct drm_device *dev)
1583
{
1584
    struct drm_i915_private *dev_priv = dev->dev_private;
1585
 
1586
    return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1587
}
1588
 
1589
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1590
{
1591
    struct drm_device *dev = crtc->dev;
1592
    struct drm_i915_private *dev_priv = dev->dev_private;
1593
    struct drm_framebuffer *fb = crtc->fb;
1594
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1595
    struct drm_i915_gem_object *obj = intel_fb->obj;
1596
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1597
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1598
    unsigned long stall_watermark = 200;
1599
    u32 dpfc_ctl;
1600
 
1601
    dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1602
    dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1603
    I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1604
 
1605
    I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1606
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1607
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1608
    I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1609
 
1610
    /* enable it... */
1611
    I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1612
 
1613
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1614
}
1615
 
1616
static void g4x_disable_fbc(struct drm_device *dev)
1617
{
1618
    struct drm_i915_private *dev_priv = dev->dev_private;
1619
    u32 dpfc_ctl;
1620
 
1621
    /* Disable compression */
1622
    dpfc_ctl = I915_READ(DPFC_CONTROL);
1623
    if (dpfc_ctl & DPFC_CTL_EN) {
1624
        dpfc_ctl &= ~DPFC_CTL_EN;
1625
        I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1626
 
1627
        DRM_DEBUG_KMS("disabled FBC\n");
1628
    }
1629
}
1630
 
1631
static bool g4x_fbc_enabled(struct drm_device *dev)
1632
{
1633
    struct drm_i915_private *dev_priv = dev->dev_private;
1634
 
1635
    return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1636
}
1637
 
1638
static void sandybridge_blit_fbc_update(struct drm_device *dev)
1639
{
1640
	struct drm_i915_private *dev_priv = dev->dev_private;
1641
	u32 blt_ecoskpd;
1642
 
1643
	/* Make sure blitter notifies FBC of writes */
1644
	gen6_gt_force_wake_get(dev_priv);
1645
	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1646
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1647
		GEN6_BLITTER_LOCK_SHIFT;
1648
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1649
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1650
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1651
	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1652
			 GEN6_BLITTER_LOCK_SHIFT);
1653
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1654
	POSTING_READ(GEN6_BLITTER_ECOSKPD);
1655
	gen6_gt_force_wake_put(dev_priv);
1656
}
1657
 
1658
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1659
{
1660
    struct drm_device *dev = crtc->dev;
1661
    struct drm_i915_private *dev_priv = dev->dev_private;
1662
    struct drm_framebuffer *fb = crtc->fb;
1663
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1664
    struct drm_i915_gem_object *obj = intel_fb->obj;
1665
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1666
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1667
    unsigned long stall_watermark = 200;
1668
    u32 dpfc_ctl;
1669
 
1670
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1671
    dpfc_ctl &= DPFC_RESERVED;
1672
    dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1673
    /* Set persistent mode for front-buffer rendering, ala X. */
1674
    dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1675
    dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1676
    I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1677
 
1678
    I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1679
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1680
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1681
    I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1682
    I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1683
    /* enable it... */
1684
    I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1685
 
1686
    if (IS_GEN6(dev)) {
1687
        I915_WRITE(SNB_DPFC_CTL_SA,
1688
               SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1689
        I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1690
        sandybridge_blit_fbc_update(dev);
1691
    }
1692
 
1693
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1694
}
1695
 
1696
static void ironlake_disable_fbc(struct drm_device *dev)
1697
{
1698
    struct drm_i915_private *dev_priv = dev->dev_private;
1699
    u32 dpfc_ctl;
1700
 
1701
    /* Disable compression */
1702
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1703
    if (dpfc_ctl & DPFC_CTL_EN) {
1704
        dpfc_ctl &= ~DPFC_CTL_EN;
1705
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1706
 
1707
        DRM_DEBUG_KMS("disabled FBC\n");
1708
    }
1709
}
1710
 
1711
static bool ironlake_fbc_enabled(struct drm_device *dev)
1712
{
1713
    struct drm_i915_private *dev_priv = dev->dev_private;
1714
 
1715
    return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1716
}
1717
 
1718
bool intel_fbc_enabled(struct drm_device *dev)
1719
{
1720
	struct drm_i915_private *dev_priv = dev->dev_private;
1721
 
1722
	if (!dev_priv->display.fbc_enabled)
1723
		return false;
1724
 
1725
	return dev_priv->display.fbc_enabled(dev);
1726
}
1727
 
1728
 
1729
 
1730
 
1731
 
1732
 
1733
 
1734
 
1735
 
1736
 
1737
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1738
{
1739
	struct intel_fbc_work *work;
1740
	struct drm_device *dev = crtc->dev;
1741
	struct drm_i915_private *dev_priv = dev->dev_private;
1742
 
1743
	if (!dev_priv->display.enable_fbc)
1744
		return;
1745
 
1746
//	intel_cancel_fbc_work(dev_priv);
1747
 
1748
//	work = kzalloc(sizeof *work, GFP_KERNEL);
1749
//	if (work == NULL) {
1750
//		dev_priv->display.enable_fbc(crtc, interval);
1751
//		return;
1752
//	}
1753
 
1754
//	work->crtc = crtc;
1755
//	work->fb = crtc->fb;
1756
//	work->interval = interval;
1757
//	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1758
 
1759
//	dev_priv->fbc_work = work;
1760
 
1761
	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1762
 
1763
	/* Delay the actual enabling to let pageflipping cease and the
1764
	 * display to settle before starting the compression. Note that
1765
	 * this delay also serves a second purpose: it allows for a
1766
	 * vblank to pass after disabling the FBC before we attempt
1767
	 * to modify the control registers.
1768
	 *
1769
	 * A more complicated solution would involve tracking vblanks
1770
	 * following the termination of the page-flipping sequence
1771
	 * and indeed performing the enable as a co-routine and not
1772
	 * waiting synchronously upon the vblank.
1773
	 */
1774
//	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1775
}
1776
 
1777
void intel_disable_fbc(struct drm_device *dev)
1778
{
1779
	struct drm_i915_private *dev_priv = dev->dev_private;
1780
 
1781
//   intel_cancel_fbc_work(dev_priv);
1782
 
1783
	if (!dev_priv->display.disable_fbc)
1784
		return;
1785
 
1786
	dev_priv->display.disable_fbc(dev);
1787
	dev_priv->cfb_plane = -1;
1788
}
1789
 
1790
/**
1791
 * intel_update_fbc - enable/disable FBC as needed
1792
 * @dev: the drm_device
1793
 *
1794
 * Set up the framebuffer compression hardware at mode set time.  We
1795
 * enable it if possible:
1796
 *   - plane A only (on pre-965)
1797
 *   - no pixel mulitply/line duplication
1798
 *   - no alpha buffer discard
1799
 *   - no dual wide
1800
 *   - framebuffer <= 2048 in width, 1536 in height
1801
 *
1802
 * We can't assume that any compression will take place (worst case),
1803
 * so the compressed buffer has to be the same size as the uncompressed
1804
 * one.  It also must reside (along with the line length buffer) in
1805
 * stolen memory.
1806
 *
1807
 * We need to enable/disable FBC on a global basis.
1808
 */
1809
static void intel_update_fbc(struct drm_device *dev)
1810
{
1811
	struct drm_i915_private *dev_priv = dev->dev_private;
1812
	struct drm_crtc *crtc = NULL, *tmp_crtc;
1813
	struct intel_crtc *intel_crtc;
1814
	struct drm_framebuffer *fb;
1815
	struct intel_framebuffer *intel_fb;
1816
	struct drm_i915_gem_object *obj;
2342 Serge 1817
	int enable_fbc;
2327 Serge 1818
 
1819
	DRM_DEBUG_KMS("\n");
1820
 
1821
	if (!i915_powersave)
1822
		return;
1823
 
1824
	if (!I915_HAS_FBC(dev))
1825
		return;
1826
 
1827
	/*
1828
	 * If FBC is already on, we just have to verify that we can
1829
	 * keep it that way...
1830
	 * Need to disable if:
1831
	 *   - more than one pipe is active
1832
	 *   - changing FBC params (stride, fence, mode)
1833
	 *   - new fb is too large to fit in compressed buffer
1834
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
1835
	 */
1836
	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1837
		if (tmp_crtc->enabled && tmp_crtc->fb) {
1838
			if (crtc) {
1839
				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
2336 Serge 1840
                dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
2327 Serge 1841
				goto out_disable;
1842
			}
1843
			crtc = tmp_crtc;
1844
		}
1845
	}
1846
 
1847
	if (!crtc || crtc->fb == NULL) {
1848
		DRM_DEBUG_KMS("no output, disabling\n");
2336 Serge 1849
        dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
2327 Serge 1850
		goto out_disable;
1851
	}
1852
 
1853
	intel_crtc = to_intel_crtc(crtc);
1854
	fb = crtc->fb;
1855
	intel_fb = to_intel_framebuffer(fb);
1856
	obj = intel_fb->obj;
1857
 
2342 Serge 1858
	enable_fbc = i915_enable_fbc;
1859
	if (enable_fbc < 0) {
1860
		DRM_DEBUG_KMS("fbc set to per-chip default\n");
1861
		enable_fbc = 1;
2360 Serge 1862
		if (INTEL_INFO(dev)->gen <= 6)
2342 Serge 1863
			enable_fbc = 0;
1864
	}
1865
	if (!enable_fbc) {
1866
		DRM_DEBUG_KMS("fbc disabled per module param\n");
2336 Serge 1867
        dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
2327 Serge 1868
		goto out_disable;
1869
	}
1870
	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1871
		DRM_DEBUG_KMS("framebuffer too large, disabling "
1872
			      "compression\n");
2336 Serge 1873
        dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
2327 Serge 1874
		goto out_disable;
1875
	}
1876
	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1877
	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1878
		DRM_DEBUG_KMS("mode incompatible with compression, "
1879
			      "disabling\n");
2336 Serge 1880
        dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
2327 Serge 1881
		goto out_disable;
1882
	}
1883
	if ((crtc->mode.hdisplay > 2048) ||
1884
	    (crtc->mode.vdisplay > 1536)) {
1885
		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
2336 Serge 1886
        dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
2327 Serge 1887
		goto out_disable;
1888
	}
1889
	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1890
		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
2336 Serge 1891
        dev_priv->no_fbc_reason = FBC_BAD_PLANE;
2327 Serge 1892
		goto out_disable;
1893
	}
1894
 
1895
	/* The use of a CPU fence is mandatory in order to detect writes
1896
	 * by the CPU to the scanout and trigger updates to the FBC.
1897
	 */
1898
//	if (obj->tiling_mode != I915_TILING_X ||
1899
//	    obj->fence_reg == I915_FENCE_REG_NONE) {
1900
//		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1901
//		dev_priv->no_fbc_reason = FBC_NOT_TILED;
1902
//		goto out_disable;
1903
//	}
1904
 
1905
	/* If the kernel debugger is active, always disable compression */
1906
	if (in_dbg_master())
1907
		goto out_disable;
1908
 
1909
	/* If the scanout has not changed, don't modify the FBC settings.
1910
	 * Note that we make the fundamental assumption that the fb->obj
1911
	 * cannot be unpinned (and have its GTT offset and fence revoked)
1912
	 * without first being decoupled from the scanout and FBC disabled.
1913
	 */
1914
	if (dev_priv->cfb_plane == intel_crtc->plane &&
1915
	    dev_priv->cfb_fb == fb->base.id &&
1916
	    dev_priv->cfb_y == crtc->y)
1917
		return;
1918
 
1919
	if (intel_fbc_enabled(dev)) {
1920
		/* We update FBC along two paths, after changing fb/crtc
1921
		 * configuration (modeswitching) and after page-flipping
1922
		 * finishes. For the latter, we know that not only did
1923
		 * we disable the FBC at the start of the page-flip
1924
		 * sequence, but also more than one vblank has passed.
1925
		 *
1926
		 * For the former case of modeswitching, it is possible
1927
		 * to switch between two FBC valid configurations
1928
		 * instantaneously so we do need to disable the FBC
1929
		 * before we can modify its control registers. We also
1930
		 * have to wait for the next vblank for that to take
1931
		 * effect. However, since we delay enabling FBC we can
1932
		 * assume that a vblank has passed since disabling and
1933
		 * that we can safely alter the registers in the deferred
1934
		 * callback.
1935
		 *
1936
		 * In the scenario that we go from a valid to invalid
1937
		 * and then back to valid FBC configuration we have
1938
		 * no strict enforcement that a vblank occurred since
1939
		 * disabling the FBC. However, along all current pipe
1940
		 * disabling paths we do need to wait for a vblank at
1941
		 * some point. And we wait before enabling FBC anyway.
1942
		 */
1943
		DRM_DEBUG_KMS("disabling active FBC for update\n");
1944
		intel_disable_fbc(dev);
1945
	}
1946
 
1947
	intel_enable_fbc(crtc, 500);
1948
	return;
1949
 
1950
out_disable:
1951
	/* Multiple disables should be harmless */
1952
	if (intel_fbc_enabled(dev)) {
1953
		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1954
		intel_disable_fbc(dev);
1955
	}
1956
}
1957
 
2335 Serge 1958
int
1959
intel_pin_and_fence_fb_obj(struct drm_device *dev,
1960
			   struct drm_i915_gem_object *obj,
1961
			   struct intel_ring_buffer *pipelined)
1962
{
1963
	struct drm_i915_private *dev_priv = dev->dev_private;
1964
	u32 alignment;
1965
	int ret;
2327 Serge 1966
 
2335 Serge 1967
	switch (obj->tiling_mode) {
1968
	case I915_TILING_NONE:
1969
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1970
			alignment = 128 * 1024;
1971
		else if (INTEL_INFO(dev)->gen >= 4)
1972
			alignment = 4 * 1024;
1973
		else
1974
			alignment = 64 * 1024;
1975
		break;
1976
	case I915_TILING_X:
1977
		/* pin() will align the object as required by fence */
1978
		alignment = 0;
1979
		break;
1980
	case I915_TILING_Y:
1981
		/* FIXME: Is this true? */
1982
		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1983
		return -EINVAL;
1984
	default:
1985
		BUG();
1986
	}
2327 Serge 1987
 
2335 Serge 1988
	dev_priv->mm.interruptible = false;
1989
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1990
	if (ret)
1991
		goto err_interruptible;
2327 Serge 1992
 
2335 Serge 1993
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1994
	 * fence, whereas 965+ only requires a fence if using
1995
	 * framebuffer compression.  For simplicity, we always install
1996
	 * a fence as the cost is not that onerous.
1997
	 */
1998
//	if (obj->tiling_mode != I915_TILING_NONE) {
1999
//		ret = i915_gem_object_get_fence(obj, pipelined);
2000
//		if (ret)
2001
//			goto err_unpin;
2002
//	}
2327 Serge 2003
 
2335 Serge 2004
	dev_priv->mm.interruptible = true;
2005
	return 0;
2327 Serge 2006
 
2335 Serge 2007
err_unpin:
2344 Serge 2008
	i915_gem_object_unpin(obj);
2335 Serge 2009
err_interruptible:
2010
	dev_priv->mm.interruptible = true;
2011
	return ret;
2012
}
2327 Serge 2013
 
2014
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2015
                 int x, int y)
2016
{
2017
    struct drm_device *dev = crtc->dev;
2018
    struct drm_i915_private *dev_priv = dev->dev_private;
2019
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2020
    struct intel_framebuffer *intel_fb;
2021
    struct drm_i915_gem_object *obj;
2022
    int plane = intel_crtc->plane;
2023
    unsigned long Start, Offset;
2024
    u32 dspcntr;
2025
    u32 reg;
2026
 
2027
    switch (plane) {
2028
    case 0:
2029
    case 1:
2030
        break;
2031
    default:
2032
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2033
        return -EINVAL;
2034
    }
2035
 
2036
    intel_fb = to_intel_framebuffer(fb);
2037
    obj = intel_fb->obj;
2038
 
2039
    reg = DSPCNTR(plane);
2040
    dspcntr = I915_READ(reg);
2041
    /* Mask out pixel format bits in case we change it */
2042
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2043
    switch (fb->bits_per_pixel) {
2044
    case 8:
2045
        dspcntr |= DISPPLANE_8BPP;
2046
        break;
2047
    case 16:
2048
        if (fb->depth == 15)
2049
            dspcntr |= DISPPLANE_15_16BPP;
2050
        else
2051
            dspcntr |= DISPPLANE_16BPP;
2052
        break;
2053
    case 24:
2054
    case 32:
2055
        dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2056
        break;
2057
    default:
2058
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2059
        return -EINVAL;
2060
    }
2061
    if (INTEL_INFO(dev)->gen >= 4) {
2062
        if (obj->tiling_mode != I915_TILING_NONE)
2063
            dspcntr |= DISPPLANE_TILED;
2064
        else
2065
            dspcntr &= ~DISPPLANE_TILED;
2066
    }
2067
 
2068
    I915_WRITE(reg, dspcntr);
2069
 
2070
    Start = obj->gtt_offset;
2342 Serge 2071
	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2327 Serge 2072
 
2073
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2342 Serge 2074
		      Start, Offset, x, y, fb->pitches[0]);
2075
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2327 Serge 2076
    if (INTEL_INFO(dev)->gen >= 4) {
2077
        I915_WRITE(DSPSURF(plane), Start);
2078
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2079
        I915_WRITE(DSPADDR(plane), Offset);
2080
    } else
2081
        I915_WRITE(DSPADDR(plane), Start + Offset);
2082
    POSTING_READ(reg);
2083
 
2084
    return 0;
2085
}
2086
 
2087
static int ironlake_update_plane(struct drm_crtc *crtc,
2088
                 struct drm_framebuffer *fb, int x, int y)
2089
{
2090
    struct drm_device *dev = crtc->dev;
2091
    struct drm_i915_private *dev_priv = dev->dev_private;
2092
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2093
    struct intel_framebuffer *intel_fb;
2094
    struct drm_i915_gem_object *obj;
2095
    int plane = intel_crtc->plane;
2096
    unsigned long Start, Offset;
2097
    u32 dspcntr;
2098
    u32 reg;
2099
 
2100
    switch (plane) {
2101
    case 0:
2102
    case 1:
2342 Serge 2103
	case 2:
2327 Serge 2104
        break;
2105
    default:
2106
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2107
        return -EINVAL;
2108
    }
2109
 
2110
    intel_fb = to_intel_framebuffer(fb);
2111
    obj = intel_fb->obj;
2112
 
2113
    reg = DSPCNTR(plane);
2114
    dspcntr = I915_READ(reg);
2115
    /* Mask out pixel format bits in case we change it */
2116
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2117
    switch (fb->bits_per_pixel) {
2118
    case 8:
2119
        dspcntr |= DISPPLANE_8BPP;
2120
        break;
2121
    case 16:
2122
        if (fb->depth != 16)
2123
            return -EINVAL;
2124
 
2125
        dspcntr |= DISPPLANE_16BPP;
2126
        break;
2127
    case 24:
2128
    case 32:
2129
        if (fb->depth == 24)
2130
            dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2131
        else if (fb->depth == 30)
2132
            dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2133
        else
2134
            return -EINVAL;
2135
        break;
2136
    default:
2137
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2138
        return -EINVAL;
2139
    }
2140
 
2141
//    if (obj->tiling_mode != I915_TILING_NONE)
2142
//        dspcntr |= DISPPLANE_TILED;
2143
//    else
2144
        dspcntr &= ~DISPPLANE_TILED;
2145
 
2146
    /* must disable */
2147
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2148
 
2149
    I915_WRITE(reg, dspcntr);
2150
 
2336 Serge 2151
    Start = obj->gtt_offset;
2342 Serge 2152
	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2327 Serge 2153
 
2154
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2342 Serge 2155
		      Start, Offset, x, y, fb->pitches[0]);
2156
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2330 Serge 2157
	I915_WRITE(DSPSURF(plane), Start);
2158
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2159
	I915_WRITE(DSPADDR(plane), Offset);
2160
	POSTING_READ(reg);
2327 Serge 2161
 
2162
    return 0;
2163
}
2164
 
2165
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2166
static int
2167
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2168
			   int x, int y, enum mode_set_atomic state)
2169
{
2170
	struct drm_device *dev = crtc->dev;
2171
	struct drm_i915_private *dev_priv = dev->dev_private;
2172
	int ret;
2173
 
2174
	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2175
	if (ret)
2176
		return ret;
2177
 
2178
	intel_update_fbc(dev);
2179
	intel_increase_pllclock(crtc);
2180
 
2181
	return 0;
2182
}
2183
 
2184
static int
2185
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2186
		    struct drm_framebuffer *old_fb)
2187
{
2188
	struct drm_device *dev = crtc->dev;
2189
	struct drm_i915_master_private *master_priv;
2190
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2342 Serge 2191
	int ret;
2327 Serge 2192
 
2336 Serge 2193
    ENTER();
2194
 
2327 Serge 2195
	/* no fb bound */
2196
	if (!crtc->fb) {
2197
		DRM_ERROR("No FB bound\n");
2198
		return 0;
2199
	}
2200
 
2201
	switch (intel_crtc->plane) {
2202
	case 0:
2203
	case 1:
2204
		break;
2342 Serge 2205
	case 2:
2206
		if (IS_IVYBRIDGE(dev))
2207
			break;
2208
		/* fall through otherwise */
2327 Serge 2209
	default:
2210
		DRM_ERROR("no plane for crtc\n");
2211
		return -EINVAL;
2212
	}
2213
 
2214
	mutex_lock(&dev->struct_mutex);
2215
 
2336 Serge 2216
    ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2217
					 LEAVE_ATOMIC_MODE_SET);
2327 Serge 2218
	if (ret) {
2344 Serge 2219
		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2327 Serge 2220
		mutex_unlock(&dev->struct_mutex);
2221
		DRM_ERROR("failed to update base address\n");
2336 Serge 2222
        LEAVE();
2327 Serge 2223
		return ret;
2224
	}
2225
 
2336 Serge 2226
	mutex_unlock(&dev->struct_mutex);
2327 Serge 2227
 
2336 Serge 2228
 
2229
    LEAVE();
2230
    return 0;
2231
 
2327 Serge 2232
 
2233
}
2234
 
2235
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2236
{
2237
	struct drm_device *dev = crtc->dev;
2238
	struct drm_i915_private *dev_priv = dev->dev_private;
2239
	u32 dpa_ctl;
2240
 
2241
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2242
	dpa_ctl = I915_READ(DP_A);
2243
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2244
 
2245
	if (clock < 200000) {
2246
		u32 temp;
2247
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2248
		/* workaround for 160Mhz:
2249
		   1) program 0x4600c bits 15:0 = 0x8124
2250
		   2) program 0x46010 bit 0 = 1
2251
		   3) program 0x46034 bit 24 = 1
2252
		   4) program 0x64000 bit 14 = 1
2253
		   */
2254
		temp = I915_READ(0x4600c);
2255
		temp &= 0xffff0000;
2256
		I915_WRITE(0x4600c, temp | 0x8124);
2257
 
2258
		temp = I915_READ(0x46010);
2259
		I915_WRITE(0x46010, temp | 1);
2260
 
2261
		temp = I915_READ(0x46034);
2262
		I915_WRITE(0x46034, temp | (1 << 24));
2263
	} else {
2264
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2265
	}
2266
	I915_WRITE(DP_A, dpa_ctl);
2267
 
2268
	POSTING_READ(DP_A);
2269
	udelay(500);
2270
}
2271
 
2272
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2273
{
2274
	struct drm_device *dev = crtc->dev;
2275
	struct drm_i915_private *dev_priv = dev->dev_private;
2276
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2277
	int pipe = intel_crtc->pipe;
2278
	u32 reg, temp;
2279
 
2280
	/* enable normal train */
2281
	reg = FDI_TX_CTL(pipe);
2282
	temp = I915_READ(reg);
2283
	if (IS_IVYBRIDGE(dev)) {
2284
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2285
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2286
	} else {
2287
		temp &= ~FDI_LINK_TRAIN_NONE;
2288
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2289
	}
2290
	I915_WRITE(reg, temp);
2291
 
2292
	reg = FDI_RX_CTL(pipe);
2293
	temp = I915_READ(reg);
2294
	if (HAS_PCH_CPT(dev)) {
2295
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2296
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2297
	} else {
2298
		temp &= ~FDI_LINK_TRAIN_NONE;
2299
		temp |= FDI_LINK_TRAIN_NONE;
2300
	}
2301
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2302
 
2303
	/* wait one idle pattern time */
2304
	POSTING_READ(reg);
2305
	udelay(1000);
2306
 
2307
	/* IVB wants error correction enabled */
2308
	if (IS_IVYBRIDGE(dev))
2309
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2310
			   FDI_FE_ERRC_ENABLE);
2311
}
2312
 
2313
static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2314
{
2315
	struct drm_i915_private *dev_priv = dev->dev_private;
2316
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2317
 
2318
	flags |= FDI_PHASE_SYNC_OVR(pipe);
2319
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2320
	flags |= FDI_PHASE_SYNC_EN(pipe);
2321
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2322
	POSTING_READ(SOUTH_CHICKEN1);
2323
}
2324
 
2325
/* The FDI link training functions for ILK/Ibexpeak. */
2326
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2327
{
2328
    struct drm_device *dev = crtc->dev;
2329
    struct drm_i915_private *dev_priv = dev->dev_private;
2330
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2331
    int pipe = intel_crtc->pipe;
2332
    int plane = intel_crtc->plane;
2333
    u32 reg, temp, tries;
2334
 
2335
    /* FDI needs bits from pipe & plane first */
2336
    assert_pipe_enabled(dev_priv, pipe);
2337
    assert_plane_enabled(dev_priv, plane);
2338
 
2339
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2340
       for train result */
2341
    reg = FDI_RX_IMR(pipe);
2342
    temp = I915_READ(reg);
2343
    temp &= ~FDI_RX_SYMBOL_LOCK;
2344
    temp &= ~FDI_RX_BIT_LOCK;
2345
    I915_WRITE(reg, temp);
2346
    I915_READ(reg);
2347
    udelay(150);
2348
 
2349
    /* enable CPU FDI TX and PCH FDI RX */
2350
    reg = FDI_TX_CTL(pipe);
2351
    temp = I915_READ(reg);
2352
    temp &= ~(7 << 19);
2353
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2354
    temp &= ~FDI_LINK_TRAIN_NONE;
2355
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2356
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2357
 
2358
    reg = FDI_RX_CTL(pipe);
2359
    temp = I915_READ(reg);
2360
    temp &= ~FDI_LINK_TRAIN_NONE;
2361
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2362
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2363
 
2364
    POSTING_READ(reg);
2365
    udelay(150);
2366
 
2367
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2368
    if (HAS_PCH_IBX(dev)) {
2369
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2370
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2371
               FDI_RX_PHASE_SYNC_POINTER_EN);
2372
    }
2373
 
2374
    reg = FDI_RX_IIR(pipe);
2375
    for (tries = 0; tries < 5; tries++) {
2376
        temp = I915_READ(reg);
2377
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2378
 
2379
        if ((temp & FDI_RX_BIT_LOCK)) {
2380
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2381
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2382
            break;
2383
        }
2384
    }
2385
    if (tries == 5)
2386
        DRM_ERROR("FDI train 1 fail!\n");
2387
 
2388
    /* Train 2 */
2389
    reg = FDI_TX_CTL(pipe);
2390
    temp = I915_READ(reg);
2391
    temp &= ~FDI_LINK_TRAIN_NONE;
2392
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2393
    I915_WRITE(reg, temp);
2394
 
2395
    reg = FDI_RX_CTL(pipe);
2396
    temp = I915_READ(reg);
2397
    temp &= ~FDI_LINK_TRAIN_NONE;
2398
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2399
    I915_WRITE(reg, temp);
2400
 
2401
    POSTING_READ(reg);
2402
    udelay(150);
2403
 
2404
    reg = FDI_RX_IIR(pipe);
2405
    for (tries = 0; tries < 5; tries++) {
2406
        temp = I915_READ(reg);
2407
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2408
 
2409
        if (temp & FDI_RX_SYMBOL_LOCK) {
2410
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2411
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2412
            break;
2413
        }
2414
    }
2415
    if (tries == 5)
2416
        DRM_ERROR("FDI train 2 fail!\n");
2417
 
2418
    DRM_DEBUG_KMS("FDI train done\n");
2419
 
2420
}
2421
 
2342 Serge 2422
static const int snb_b_fdi_train_param[] = {
2327 Serge 2423
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2424
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2425
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2426
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2427
};
2428
 
2429
/* The FDI link training functions for SNB/Cougarpoint. */
2430
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2431
{
2432
    struct drm_device *dev = crtc->dev;
2433
    struct drm_i915_private *dev_priv = dev->dev_private;
2434
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2435
    int pipe = intel_crtc->pipe;
2436
    u32 reg, temp, i;
2437
 
2438
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2439
       for train result */
2440
    reg = FDI_RX_IMR(pipe);
2441
    temp = I915_READ(reg);
2442
    temp &= ~FDI_RX_SYMBOL_LOCK;
2443
    temp &= ~FDI_RX_BIT_LOCK;
2444
    I915_WRITE(reg, temp);
2445
 
2446
    POSTING_READ(reg);
2447
    udelay(150);
2448
 
2449
    /* enable CPU FDI TX and PCH FDI RX */
2450
    reg = FDI_TX_CTL(pipe);
2451
    temp = I915_READ(reg);
2452
    temp &= ~(7 << 19);
2453
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2454
    temp &= ~FDI_LINK_TRAIN_NONE;
2455
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2456
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2457
    /* SNB-B */
2458
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2459
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2460
 
2461
    reg = FDI_RX_CTL(pipe);
2462
    temp = I915_READ(reg);
2463
    if (HAS_PCH_CPT(dev)) {
2464
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2465
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2466
    } else {
2467
        temp &= ~FDI_LINK_TRAIN_NONE;
2468
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2469
    }
2470
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2471
 
2472
    POSTING_READ(reg);
2473
    udelay(150);
2474
 
2475
    if (HAS_PCH_CPT(dev))
2476
        cpt_phase_pointer_enable(dev, pipe);
2477
 
2342 Serge 2478
	for (i = 0; i < 4; i++) {
2327 Serge 2479
        reg = FDI_TX_CTL(pipe);
2480
        temp = I915_READ(reg);
2481
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2482
        temp |= snb_b_fdi_train_param[i];
2483
        I915_WRITE(reg, temp);
2484
 
2485
        POSTING_READ(reg);
2486
        udelay(500);
2487
 
2488
        reg = FDI_RX_IIR(pipe);
2489
        temp = I915_READ(reg);
2490
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2491
 
2492
        if (temp & FDI_RX_BIT_LOCK) {
2493
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2494
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2495
            break;
2496
        }
2497
    }
2498
    if (i == 4)
2499
        DRM_ERROR("FDI train 1 fail!\n");
2500
 
2501
    /* Train 2 */
2502
    reg = FDI_TX_CTL(pipe);
2503
    temp = I915_READ(reg);
2504
    temp &= ~FDI_LINK_TRAIN_NONE;
2505
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2506
    if (IS_GEN6(dev)) {
2507
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2508
        /* SNB-B */
2509
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2510
    }
2511
    I915_WRITE(reg, temp);
2512
 
2513
    reg = FDI_RX_CTL(pipe);
2514
    temp = I915_READ(reg);
2515
    if (HAS_PCH_CPT(dev)) {
2516
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2517
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2518
    } else {
2519
        temp &= ~FDI_LINK_TRAIN_NONE;
2520
        temp |= FDI_LINK_TRAIN_PATTERN_2;
2521
    }
2522
    I915_WRITE(reg, temp);
2523
 
2524
    POSTING_READ(reg);
2525
    udelay(150);
2526
 
2342 Serge 2527
	for (i = 0; i < 4; i++) {
2327 Serge 2528
        reg = FDI_TX_CTL(pipe);
2529
        temp = I915_READ(reg);
2530
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2531
        temp |= snb_b_fdi_train_param[i];
2532
        I915_WRITE(reg, temp);
2533
 
2534
        POSTING_READ(reg);
2535
        udelay(500);
2536
 
2537
        reg = FDI_RX_IIR(pipe);
2538
        temp = I915_READ(reg);
2539
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2540
 
2541
        if (temp & FDI_RX_SYMBOL_LOCK) {
2542
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2543
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2544
            break;
2545
        }
2546
    }
2547
    if (i == 4)
2548
        DRM_ERROR("FDI train 2 fail!\n");
2549
 
2550
    DRM_DEBUG_KMS("FDI train done.\n");
2551
}
2552
 
2553
/* Manual link training for Ivy Bridge A0 parts */
2554
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2555
{
2556
    struct drm_device *dev = crtc->dev;
2557
    struct drm_i915_private *dev_priv = dev->dev_private;
2558
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2559
    int pipe = intel_crtc->pipe;
2560
    u32 reg, temp, i;
2561
 
2562
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2563
       for train result */
2564
    reg = FDI_RX_IMR(pipe);
2565
    temp = I915_READ(reg);
2566
    temp &= ~FDI_RX_SYMBOL_LOCK;
2567
    temp &= ~FDI_RX_BIT_LOCK;
2568
    I915_WRITE(reg, temp);
2569
 
2570
    POSTING_READ(reg);
2571
    udelay(150);
2572
 
2573
    /* enable CPU FDI TX and PCH FDI RX */
2574
    reg = FDI_TX_CTL(pipe);
2575
    temp = I915_READ(reg);
2576
    temp &= ~(7 << 19);
2577
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2578
    temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2579
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2580
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2581
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2342 Serge 2582
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 2583
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2584
 
2585
    reg = FDI_RX_CTL(pipe);
2586
    temp = I915_READ(reg);
2587
    temp &= ~FDI_LINK_TRAIN_AUTO;
2588
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2589
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2342 Serge 2590
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 2591
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2592
 
2593
    POSTING_READ(reg);
2594
    udelay(150);
2595
 
2596
    if (HAS_PCH_CPT(dev))
2597
        cpt_phase_pointer_enable(dev, pipe);
2598
 
2342 Serge 2599
	for (i = 0; i < 4; i++) {
2327 Serge 2600
        reg = FDI_TX_CTL(pipe);
2601
        temp = I915_READ(reg);
2602
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2603
        temp |= snb_b_fdi_train_param[i];
2604
        I915_WRITE(reg, temp);
2605
 
2606
        POSTING_READ(reg);
2607
        udelay(500);
2608
 
2609
        reg = FDI_RX_IIR(pipe);
2610
        temp = I915_READ(reg);
2611
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2612
 
2613
        if (temp & FDI_RX_BIT_LOCK ||
2614
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2615
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2616
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2617
            break;
2618
        }
2619
    }
2620
    if (i == 4)
2621
        DRM_ERROR("FDI train 1 fail!\n");
2622
 
2623
    /* Train 2 */
2624
    reg = FDI_TX_CTL(pipe);
2625
    temp = I915_READ(reg);
2626
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2627
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2628
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2629
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2630
    I915_WRITE(reg, temp);
2631
 
2632
    reg = FDI_RX_CTL(pipe);
2633
    temp = I915_READ(reg);
2634
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2635
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2636
    I915_WRITE(reg, temp);
2637
 
2638
    POSTING_READ(reg);
2639
    udelay(150);
2640
 
2342 Serge 2641
	for (i = 0; i < 4; i++) {
2327 Serge 2642
        reg = FDI_TX_CTL(pipe);
2643
        temp = I915_READ(reg);
2644
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2645
        temp |= snb_b_fdi_train_param[i];
2646
        I915_WRITE(reg, temp);
2647
 
2648
        POSTING_READ(reg);
2649
        udelay(500);
2650
 
2651
        reg = FDI_RX_IIR(pipe);
2652
        temp = I915_READ(reg);
2653
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2654
 
2655
        if (temp & FDI_RX_SYMBOL_LOCK) {
2656
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2657
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2658
            break;
2659
        }
2660
    }
2661
    if (i == 4)
2662
        DRM_ERROR("FDI train 2 fail!\n");
2663
 
2664
    DRM_DEBUG_KMS("FDI train done.\n");
2665
}
2666
 
2667
static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2668
{
2669
	struct drm_device *dev = crtc->dev;
2670
	struct drm_i915_private *dev_priv = dev->dev_private;
2671
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2672
	int pipe = intel_crtc->pipe;
2673
	u32 reg, temp;
2674
 
2675
	/* Write the TU size bits so error detection works */
2676
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2677
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2678
 
2679
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2680
	reg = FDI_RX_CTL(pipe);
2681
	temp = I915_READ(reg);
2682
	temp &= ~((0x7 << 19) | (0x7 << 16));
2683
	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2684
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2685
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2686
 
2687
	POSTING_READ(reg);
2688
	udelay(200);
2689
 
2690
	/* Switch from Rawclk to PCDclk */
2691
	temp = I915_READ(reg);
2692
	I915_WRITE(reg, temp | FDI_PCDCLK);
2693
 
2694
	POSTING_READ(reg);
2695
	udelay(200);
2696
 
2697
	/* Enable CPU FDI TX PLL, always on for Ironlake */
2698
	reg = FDI_TX_CTL(pipe);
2699
	temp = I915_READ(reg);
2700
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2701
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2702
 
2703
		POSTING_READ(reg);
2704
		udelay(100);
2705
	}
2706
}
2707
 
2708
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2709
{
2710
	struct drm_i915_private *dev_priv = dev->dev_private;
2711
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2712
 
2713
	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2714
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2715
	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2716
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2717
	POSTING_READ(SOUTH_CHICKEN1);
2718
}
2719
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2720
{
2721
	struct drm_device *dev = crtc->dev;
2722
	struct drm_i915_private *dev_priv = dev->dev_private;
2723
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2724
	int pipe = intel_crtc->pipe;
2725
	u32 reg, temp;
2726
 
2727
	/* disable CPU FDI tx and PCH FDI rx */
2728
	reg = FDI_TX_CTL(pipe);
2729
	temp = I915_READ(reg);
2730
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2731
	POSTING_READ(reg);
2732
 
2733
	reg = FDI_RX_CTL(pipe);
2734
	temp = I915_READ(reg);
2735
	temp &= ~(0x7 << 16);
2736
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2737
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2738
 
2739
	POSTING_READ(reg);
2740
	udelay(100);
2741
 
2742
	/* Ironlake workaround, disable clock pointer after downing FDI */
2743
	if (HAS_PCH_IBX(dev)) {
2744
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2745
		I915_WRITE(FDI_RX_CHICKEN(pipe),
2746
			   I915_READ(FDI_RX_CHICKEN(pipe) &
2747
				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2748
	} else if (HAS_PCH_CPT(dev)) {
2749
		cpt_phase_pointer_disable(dev, pipe);
2750
	}
2751
 
2752
	/* still set train pattern 1 */
2753
	reg = FDI_TX_CTL(pipe);
2754
	temp = I915_READ(reg);
2755
	temp &= ~FDI_LINK_TRAIN_NONE;
2756
	temp |= FDI_LINK_TRAIN_PATTERN_1;
2757
	I915_WRITE(reg, temp);
2758
 
2759
	reg = FDI_RX_CTL(pipe);
2760
	temp = I915_READ(reg);
2761
	if (HAS_PCH_CPT(dev)) {
2762
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2763
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2764
	} else {
2765
		temp &= ~FDI_LINK_TRAIN_NONE;
2766
		temp |= FDI_LINK_TRAIN_PATTERN_1;
2767
	}
2768
	/* BPC in FDI rx is consistent with that in PIPECONF */
2769
	temp &= ~(0x07 << 16);
2770
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2771
	I915_WRITE(reg, temp);
2772
 
2773
	POSTING_READ(reg);
2774
	udelay(100);
2775
}
2776
 
2777
/*
2778
 * When we disable a pipe, we need to clear any pending scanline wait events
2779
 * to avoid hanging the ring, which we assume we are waiting on.
2780
 */
2781
static void intel_clear_scanline_wait(struct drm_device *dev)
2782
{
2783
	struct drm_i915_private *dev_priv = dev->dev_private;
2784
	struct intel_ring_buffer *ring;
2785
	u32 tmp;
2786
 
2787
	if (IS_GEN2(dev))
2788
		/* Can't break the hang on i8xx */
2789
		return;
2790
 
2791
	ring = LP_RING(dev_priv);
2792
	tmp = I915_READ_CTL(ring);
2793
	if (tmp & RING_WAIT)
2794
		I915_WRITE_CTL(ring, tmp);
2795
}
2796
 
2797
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2798
{
2799
	struct drm_i915_gem_object *obj;
2800
	struct drm_i915_private *dev_priv;
2801
 
2802
	if (crtc->fb == NULL)
2803
		return;
2804
 
2805
	obj = to_intel_framebuffer(crtc->fb)->obj;
2806
	dev_priv = crtc->dev->dev_private;
2360 Serge 2807
	wait_event(dev_priv->pending_flip_queue,
2808
		   atomic_read(&obj->pending_flip) == 0);
2327 Serge 2809
}
2810
 
2811
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2812
{
2813
	struct drm_device *dev = crtc->dev;
2814
	struct drm_mode_config *mode_config = &dev->mode_config;
2815
	struct intel_encoder *encoder;
2816
 
2817
	/*
2818
	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2819
	 * must be driven by its own crtc; no sharing is possible.
2820
	 */
2821
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2822
		if (encoder->base.crtc != crtc)
2823
			continue;
2824
 
2825
		switch (encoder->type) {
2826
		case INTEL_OUTPUT_EDP:
2827
			if (!intel_encoder_is_pch_edp(&encoder->base))
2828
				return false;
2829
			continue;
2830
		}
2831
	}
2832
 
2833
	return true;
2834
}
2835
 
2836
/*
2837
 * Enable PCH resources required for PCH ports:
2838
 *   - PCH PLLs
2839
 *   - FDI training & RX/TX
2840
 *   - update transcoder timings
2841
 *   - DP transcoding bits
2842
 *   - transcoder
2843
 */
2844
static void ironlake_pch_enable(struct drm_crtc *crtc)
2845
{
2846
	struct drm_device *dev = crtc->dev;
2847
	struct drm_i915_private *dev_priv = dev->dev_private;
2848
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2849
	int pipe = intel_crtc->pipe;
2342 Serge 2850
	u32 reg, temp, transc_sel;
2327 Serge 2851
 
2852
	/* For PCH output, training FDI link */
2853
	dev_priv->display.fdi_link_train(crtc);
2854
 
2855
	intel_enable_pch_pll(dev_priv, pipe);
2856
 
2857
	if (HAS_PCH_CPT(dev)) {
2342 Serge 2858
		transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
2859
			TRANSC_DPLLB_SEL;
2860
 
2327 Serge 2861
		/* Be sure PCH DPLL SEL is set */
2862
		temp = I915_READ(PCH_DPLL_SEL);
2342 Serge 2863
		if (pipe == 0) {
2864
			temp &= ~(TRANSA_DPLLB_SEL);
2327 Serge 2865
			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2342 Serge 2866
		} else if (pipe == 1) {
2867
			temp &= ~(TRANSB_DPLLB_SEL);
2327 Serge 2868
			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2342 Serge 2869
		} else if (pipe == 2) {
2870
			temp &= ~(TRANSC_DPLLB_SEL);
2871
			temp |= (TRANSC_DPLL_ENABLE | transc_sel);
2872
		}
2327 Serge 2873
		I915_WRITE(PCH_DPLL_SEL, temp);
2874
	}
2875
 
2876
	/* set transcoder timing, panel must allow it */
2877
	assert_panel_unlocked(dev_priv, pipe);
2878
	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2879
	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2880
	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2881
 
2882
	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2883
	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2884
	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2885
 
2886
	intel_fdi_normal_train(crtc);
2887
 
2888
	/* For PCH DP, enable TRANS_DP_CTL */
2889
	if (HAS_PCH_CPT(dev) &&
2342 Serge 2890
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2891
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2327 Serge 2892
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2893
		reg = TRANS_DP_CTL(pipe);
2894
		temp = I915_READ(reg);
2895
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2896
			  TRANS_DP_SYNC_MASK |
2897
			  TRANS_DP_BPC_MASK);
2898
		temp |= (TRANS_DP_OUTPUT_ENABLE |
2899
			 TRANS_DP_ENH_FRAMING);
2900
		temp |= bpc << 9; /* same format but at 11:9 */
2901
 
2902
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2903
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2904
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2905
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2906
 
2907
		switch (intel_trans_dp_port_sel(crtc)) {
2908
		case PCH_DP_B:
2909
			temp |= TRANS_DP_PORT_SEL_B;
2910
			break;
2911
		case PCH_DP_C:
2912
			temp |= TRANS_DP_PORT_SEL_C;
2913
			break;
2914
		case PCH_DP_D:
2915
			temp |= TRANS_DP_PORT_SEL_D;
2916
			break;
2917
		default:
2918
			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2919
			temp |= TRANS_DP_PORT_SEL_B;
2920
			break;
2921
		}
2922
 
2923
		I915_WRITE(reg, temp);
2924
	}
2925
 
2926
	intel_enable_transcoder(dev_priv, pipe);
2927
}
2928
 
2342 Serge 2929
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
2930
{
2931
	struct drm_i915_private *dev_priv = dev->dev_private;
2932
	int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
2933
	u32 temp;
2934
 
2935
	temp = I915_READ(dslreg);
2936
	udelay(500);
2937
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
2938
		/* Without this, mode sets may fail silently on FDI */
2939
		I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
2940
		udelay(250);
2941
		I915_WRITE(tc2reg, 0);
2942
		if (wait_for(I915_READ(dslreg) != temp, 5))
2943
			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
2944
	}
2945
}
2946
 
2327 Serge 2947
static void ironlake_crtc_enable(struct drm_crtc *crtc)
2948
{
2949
    struct drm_device *dev = crtc->dev;
2950
    struct drm_i915_private *dev_priv = dev->dev_private;
2951
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2952
    int pipe = intel_crtc->pipe;
2953
    int plane = intel_crtc->plane;
2954
    u32 temp;
2955
    bool is_pch_port;
2956
 
2957
    if (intel_crtc->active)
2958
        return;
2959
 
2960
    intel_crtc->active = true;
2961
    intel_update_watermarks(dev);
2962
 
2963
    if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2964
        temp = I915_READ(PCH_LVDS);
2965
        if ((temp & LVDS_PORT_EN) == 0)
2966
            I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2967
    }
2968
 
2969
    is_pch_port = intel_crtc_driving_pch(crtc);
2970
 
2971
    if (is_pch_port)
2972
        ironlake_fdi_pll_enable(crtc);
2973
    else
2974
        ironlake_fdi_disable(crtc);
2975
 
2976
    /* Enable panel fitting for LVDS */
2977
    if (dev_priv->pch_pf_size &&
2978
        (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2979
        /* Force use of hard-coded filter coefficients
2980
         * as some pre-programmed values are broken,
2981
         * e.g. x201.
2982
         */
2983
        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
2984
        I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
2985
        I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
2986
    }
2987
 
2988
    /*
2989
     * On ILK+ LUT must be loaded before the pipe is running but with
2990
     * clocks enabled
2991
     */
2992
    intel_crtc_load_lut(crtc);
2993
 
2994
    intel_enable_pipe(dev_priv, pipe, is_pch_port);
2995
    intel_enable_plane(dev_priv, plane, pipe);
2996
 
2997
    if (is_pch_port)
2998
        ironlake_pch_enable(crtc);
2999
 
3000
    mutex_lock(&dev->struct_mutex);
3001
    intel_update_fbc(dev);
3002
    mutex_unlock(&dev->struct_mutex);
3003
 
3004
//    intel_crtc_update_cursor(crtc, true);
3005
}
3006
 
3007
static void ironlake_crtc_disable(struct drm_crtc *crtc)
3008
{
3009
    struct drm_device *dev = crtc->dev;
3010
    struct drm_i915_private *dev_priv = dev->dev_private;
3011
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3012
    int pipe = intel_crtc->pipe;
3013
    int plane = intel_crtc->plane;
3014
    u32 reg, temp;
3015
 
3016
    if (!intel_crtc->active)
3017
        return;
3018
 
2336 Serge 3019
    ENTER();
3020
 
2327 Serge 3021
    intel_crtc_wait_for_pending_flips(crtc);
3022
//    drm_vblank_off(dev, pipe);
3023
//    intel_crtc_update_cursor(crtc, false);
3024
 
3025
    intel_disable_plane(dev_priv, plane, pipe);
3026
 
3027
    if (dev_priv->cfb_plane == plane)
3028
        intel_disable_fbc(dev);
3029
 
3030
    intel_disable_pipe(dev_priv, pipe);
3031
 
3032
    /* Disable PF */
3033
    I915_WRITE(PF_CTL(pipe), 0);
3034
    I915_WRITE(PF_WIN_SZ(pipe), 0);
3035
 
3036
    ironlake_fdi_disable(crtc);
3037
 
3038
    /* This is a horrible layering violation; we should be doing this in
3039
     * the connector/encoder ->prepare instead, but we don't always have
3040
     * enough information there about the config to know whether it will
3041
     * actually be necessary or just cause undesired flicker.
3042
     */
3043
    intel_disable_pch_ports(dev_priv, pipe);
3044
 
3045
    intel_disable_transcoder(dev_priv, pipe);
3046
 
3047
    if (HAS_PCH_CPT(dev)) {
3048
        /* disable TRANS_DP_CTL */
3049
        reg = TRANS_DP_CTL(pipe);
3050
        temp = I915_READ(reg);
3051
        temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3052
        temp |= TRANS_DP_PORT_SEL_NONE;
3053
        I915_WRITE(reg, temp);
3054
 
3055
        /* disable DPLL_SEL */
3056
        temp = I915_READ(PCH_DPLL_SEL);
3057
        switch (pipe) {
3058
        case 0:
2342 Serge 3059
			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
2327 Serge 3060
            break;
3061
        case 1:
3062
            temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3063
            break;
3064
        case 2:
2342 Serge 3065
			/* C shares PLL A or B */
2327 Serge 3066
            temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3067
            break;
3068
        default:
3069
            BUG(); /* wtf */
3070
        }
3071
        I915_WRITE(PCH_DPLL_SEL, temp);
3072
    }
3073
 
3074
    /* disable PCH DPLL */
2342 Serge 3075
	if (!intel_crtc->no_pll)
3076
    	intel_disable_pch_pll(dev_priv, pipe);
2327 Serge 3077
 
3078
    /* Switch from PCDclk to Rawclk */
3079
    reg = FDI_RX_CTL(pipe);
3080
    temp = I915_READ(reg);
3081
    I915_WRITE(reg, temp & ~FDI_PCDCLK);
3082
 
3083
    /* Disable CPU FDI TX PLL */
3084
    reg = FDI_TX_CTL(pipe);
3085
    temp = I915_READ(reg);
3086
    I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3087
 
3088
    POSTING_READ(reg);
3089
    udelay(100);
3090
 
3091
    reg = FDI_RX_CTL(pipe);
3092
    temp = I915_READ(reg);
3093
    I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3094
 
3095
    /* Wait for the clocks to turn off. */
3096
    POSTING_READ(reg);
3097
    udelay(100);
3098
 
3099
    intel_crtc->active = false;
3100
    intel_update_watermarks(dev);
3101
 
3102
    mutex_lock(&dev->struct_mutex);
3103
    intel_update_fbc(dev);
3104
    intel_clear_scanline_wait(dev);
3105
    mutex_unlock(&dev->struct_mutex);
2336 Serge 3106
 
3107
    LEAVE();
3108
 
2327 Serge 3109
}
3110
 
3111
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3112
{
3113
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3114
    int pipe = intel_crtc->pipe;
3115
    int plane = intel_crtc->plane;
3116
 
3117
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3118
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3119
     */
3120
    switch (mode) {
3121
    case DRM_MODE_DPMS_ON:
3122
    case DRM_MODE_DPMS_STANDBY:
3123
    case DRM_MODE_DPMS_SUSPEND:
3124
        DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3125
        ironlake_crtc_enable(crtc);
3126
        break;
3127
 
3128
    case DRM_MODE_DPMS_OFF:
3129
        DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3130
        ironlake_crtc_disable(crtc);
3131
        break;
3132
    }
3133
}
3134
 
3135
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3136
{
3137
	if (!enable && intel_crtc->overlay) {
3138
		struct drm_device *dev = intel_crtc->base.dev;
3139
		struct drm_i915_private *dev_priv = dev->dev_private;
3140
 
3141
		mutex_lock(&dev->struct_mutex);
3142
		dev_priv->mm.interruptible = false;
3143
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3144
		dev_priv->mm.interruptible = true;
3145
		mutex_unlock(&dev->struct_mutex);
3146
	}
3147
 
3148
	/* Let userspace switch the overlay on again. In most cases userspace
3149
	 * has to recompute where to put it anyway.
3150
	 */
3151
}
3152
 
3153
static void i9xx_crtc_enable(struct drm_crtc *crtc)
3154
{
3155
    struct drm_device *dev = crtc->dev;
3156
    struct drm_i915_private *dev_priv = dev->dev_private;
3157
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3158
    int pipe = intel_crtc->pipe;
3159
    int plane = intel_crtc->plane;
3160
 
3161
    if (intel_crtc->active)
3162
        return;
3163
 
3164
    intel_crtc->active = true;
3165
    intel_update_watermarks(dev);
3166
 
3167
    intel_enable_pll(dev_priv, pipe);
3168
    intel_enable_pipe(dev_priv, pipe, false);
3169
    intel_enable_plane(dev_priv, plane, pipe);
3170
 
3171
    intel_crtc_load_lut(crtc);
3172
    intel_update_fbc(dev);
3173
 
3174
    /* Give the overlay scaler a chance to enable if it's on this pipe */
3175
    intel_crtc_dpms_overlay(intel_crtc, true);
3176
//    intel_crtc_update_cursor(crtc, true);
3177
}
3178
 
3179
static void i9xx_crtc_disable(struct drm_crtc *crtc)
3180
{
3181
    struct drm_device *dev = crtc->dev;
3182
    struct drm_i915_private *dev_priv = dev->dev_private;
3183
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3184
    int pipe = intel_crtc->pipe;
3185
    int plane = intel_crtc->plane;
3186
 
3187
    if (!intel_crtc->active)
3188
        return;
3189
 
3190
    /* Give the overlay scaler a chance to disable if it's on this pipe */
3191
    intel_crtc_wait_for_pending_flips(crtc);
3192
//    drm_vblank_off(dev, pipe);
3193
    intel_crtc_dpms_overlay(intel_crtc, false);
3194
//    intel_crtc_update_cursor(crtc, false);
3195
 
3196
    if (dev_priv->cfb_plane == plane)
3197
        intel_disable_fbc(dev);
3198
 
3199
    intel_disable_plane(dev_priv, plane, pipe);
3200
    intel_disable_pipe(dev_priv, pipe);
3201
    intel_disable_pll(dev_priv, pipe);
3202
 
3203
    intel_crtc->active = false;
3204
    intel_update_fbc(dev);
3205
    intel_update_watermarks(dev);
3206
    intel_clear_scanline_wait(dev);
3207
}
3208
 
3209
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3210
{
3211
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3212
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3213
     */
3214
    switch (mode) {
3215
    case DRM_MODE_DPMS_ON:
3216
    case DRM_MODE_DPMS_STANDBY:
3217
    case DRM_MODE_DPMS_SUSPEND:
3218
        i9xx_crtc_enable(crtc);
3219
        break;
3220
    case DRM_MODE_DPMS_OFF:
3221
        i9xx_crtc_disable(crtc);
3222
        break;
3223
    }
3224
}
3225
 
2330 Serge 3226
/**
3227
 * Sets the power management mode of the pipe and plane.
3228
 */
3229
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3230
{
3231
	struct drm_device *dev = crtc->dev;
3232
	struct drm_i915_private *dev_priv = dev->dev_private;
3233
	struct drm_i915_master_private *master_priv;
3234
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3235
	int pipe = intel_crtc->pipe;
3236
	bool enabled;
2327 Serge 3237
 
2330 Serge 3238
	if (intel_crtc->dpms_mode == mode)
3239
		return;
2327 Serge 3240
 
2330 Serge 3241
	intel_crtc->dpms_mode = mode;
2327 Serge 3242
 
2330 Serge 3243
	dev_priv->display.dpms(crtc, mode);
2327 Serge 3244
 
2340 Serge 3245
#if 0
2330 Serge 3246
	if (!dev->primary->master)
3247
		return;
2327 Serge 3248
 
2330 Serge 3249
	master_priv = dev->primary->master->driver_priv;
3250
	if (!master_priv->sarea_priv)
3251
		return;
2327 Serge 3252
 
2330 Serge 3253
	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
2327 Serge 3254
 
2330 Serge 3255
	switch (pipe) {
3256
	case 0:
3257
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3258
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3259
		break;
3260
	case 1:
3261
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3262
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3263
		break;
3264
	default:
3265
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3266
		break;
3267
	}
2340 Serge 3268
#endif
3269
 
2330 Serge 3270
}
2327 Serge 3271
 
2330 Serge 3272
static void intel_crtc_disable(struct drm_crtc *crtc)
3273
{
3274
	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3275
	struct drm_device *dev = crtc->dev;
2327 Serge 3276
 
2330 Serge 3277
	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
2327 Serge 3278
 
2330 Serge 3279
	if (crtc->fb) {
3280
		mutex_lock(&dev->struct_mutex);
2344 Serge 3281
		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2330 Serge 3282
		mutex_unlock(&dev->struct_mutex);
3283
	}
3284
}
2327 Serge 3285
 
2330 Serge 3286
/* Prepare for a mode set.
3287
 *
3288
 * Note we could be a lot smarter here.  We need to figure out which outputs
3289
 * will be enabled, which disabled (in short, how the config will changes)
3290
 * and perform the minimum necessary steps to accomplish that, e.g. updating
3291
 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3292
 * panel fitting is in the proper state, etc.
3293
 */
3294
static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3295
{
3296
	i9xx_crtc_disable(crtc);
3297
}
2327 Serge 3298
 
2330 Serge 3299
static void i9xx_crtc_commit(struct drm_crtc *crtc)
3300
{
3301
	i9xx_crtc_enable(crtc);
3302
}
2327 Serge 3303
 
2330 Serge 3304
static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3305
{
3306
	ironlake_crtc_disable(crtc);
3307
}
2327 Serge 3308
 
2330 Serge 3309
static void ironlake_crtc_commit(struct drm_crtc *crtc)
3310
{
3311
	ironlake_crtc_enable(crtc);
3312
}
2327 Serge 3313
 
2342 Serge 3314
void intel_encoder_prepare(struct drm_encoder *encoder)
2330 Serge 3315
{
3316
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3317
	/* lvds has its own version of prepare see intel_lvds_prepare */
3318
	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3319
}
2327 Serge 3320
 
2342 Serge 3321
void intel_encoder_commit(struct drm_encoder *encoder)
2330 Serge 3322
{
3323
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
2342 Serge 3324
	struct drm_device *dev = encoder->dev;
3325
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3326
	struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3327
 
2330 Serge 3328
	/* lvds has its own version of commit see intel_lvds_commit */
3329
	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
2342 Serge 3330
 
3331
	if (HAS_PCH_CPT(dev))
3332
		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
2330 Serge 3333
}
2327 Serge 3334
 
2330 Serge 3335
void intel_encoder_destroy(struct drm_encoder *encoder)
3336
{
3337
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3338
 
3339
	drm_encoder_cleanup(encoder);
3340
	kfree(intel_encoder);
3341
}
3342
 
3343
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3344
				  struct drm_display_mode *mode,
3345
				  struct drm_display_mode *adjusted_mode)
3346
{
3347
	struct drm_device *dev = crtc->dev;
3348
 
3349
	if (HAS_PCH_SPLIT(dev)) {
3350
		/* FDI link clock is fixed at 2.7G */
3351
		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3352
			return false;
3353
	}
3354
 
3355
	/* XXX some encoders set the crtcinfo, others don't.
3356
	 * Obviously we need some form of conflict resolution here...
3357
	 */
3358
	if (adjusted_mode->crtc_htotal == 0)
3359
		drm_mode_set_crtcinfo(adjusted_mode, 0);
3360
 
3361
	return true;
3362
}
3363
 
2327 Serge 3364
static int i945_get_display_clock_speed(struct drm_device *dev)
3365
{
3366
	return 400000;
3367
}
3368
 
3369
static int i915_get_display_clock_speed(struct drm_device *dev)
3370
{
3371
	return 333000;
3372
}
3373
 
3374
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3375
{
3376
	return 200000;
3377
}
3378
 
3379
static int i915gm_get_display_clock_speed(struct drm_device *dev)
3380
{
3381
	u16 gcfgc = 0;
3382
 
3383
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3384
 
3385
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3386
		return 133000;
3387
	else {
3388
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3389
		case GC_DISPLAY_CLOCK_333_MHZ:
3390
			return 333000;
3391
		default:
3392
		case GC_DISPLAY_CLOCK_190_200_MHZ:
3393
			return 190000;
3394
		}
3395
	}
3396
}
3397
 
3398
static int i865_get_display_clock_speed(struct drm_device *dev)
3399
{
3400
	return 266000;
3401
}
3402
 
3403
static int i855_get_display_clock_speed(struct drm_device *dev)
3404
{
3405
	u16 hpllcc = 0;
3406
	/* Assume that the hardware is in the high speed state.  This
3407
	 * should be the default.
3408
	 */
3409
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3410
	case GC_CLOCK_133_200:
3411
	case GC_CLOCK_100_200:
3412
		return 200000;
3413
	case GC_CLOCK_166_250:
3414
		return 250000;
3415
	case GC_CLOCK_100_133:
3416
		return 133000;
3417
	}
3418
 
3419
	/* Shouldn't happen */
3420
	return 0;
3421
}
3422
 
3423
static int i830_get_display_clock_speed(struct drm_device *dev)
3424
{
3425
	return 133000;
3426
}
3427
 
3428
struct fdi_m_n {
3429
    u32        tu;
3430
    u32        gmch_m;
3431
    u32        gmch_n;
3432
    u32        link_m;
3433
    u32        link_n;
3434
};
3435
 
3436
static void
3437
fdi_reduce_ratio(u32 *num, u32 *den)
3438
{
3439
	while (*num > 0xffffff || *den > 0xffffff) {
3440
		*num >>= 1;
3441
		*den >>= 1;
3442
	}
3443
}
3444
 
3445
static void
3446
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3447
		     int link_clock, struct fdi_m_n *m_n)
3448
{
3449
	m_n->tu = 64; /* default size */
3450
 
3451
	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3452
	m_n->gmch_m = bits_per_pixel * pixel_clock;
3453
	m_n->gmch_n = link_clock * nlanes * 8;
3454
	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3455
 
3456
	m_n->link_m = pixel_clock;
3457
	m_n->link_n = link_clock;
3458
	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3459
}
3460
 
3461
 
3462
struct intel_watermark_params {
3463
    unsigned long fifo_size;
3464
    unsigned long max_wm;
3465
    unsigned long default_wm;
3466
    unsigned long guard_size;
3467
    unsigned long cacheline_size;
3468
};
3469
 
3470
/* Pineview has different values for various configs */
3471
static const struct intel_watermark_params pineview_display_wm = {
3472
    PINEVIEW_DISPLAY_FIFO,
3473
    PINEVIEW_MAX_WM,
3474
    PINEVIEW_DFT_WM,
3475
    PINEVIEW_GUARD_WM,
3476
    PINEVIEW_FIFO_LINE_SIZE
3477
};
3478
static const struct intel_watermark_params pineview_display_hplloff_wm = {
3479
    PINEVIEW_DISPLAY_FIFO,
3480
    PINEVIEW_MAX_WM,
3481
    PINEVIEW_DFT_HPLLOFF_WM,
3482
    PINEVIEW_GUARD_WM,
3483
    PINEVIEW_FIFO_LINE_SIZE
3484
};
3485
static const struct intel_watermark_params pineview_cursor_wm = {
3486
    PINEVIEW_CURSOR_FIFO,
3487
    PINEVIEW_CURSOR_MAX_WM,
3488
    PINEVIEW_CURSOR_DFT_WM,
3489
    PINEVIEW_CURSOR_GUARD_WM,
3490
    PINEVIEW_FIFO_LINE_SIZE,
3491
};
3492
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3493
    PINEVIEW_CURSOR_FIFO,
3494
    PINEVIEW_CURSOR_MAX_WM,
3495
    PINEVIEW_CURSOR_DFT_WM,
3496
    PINEVIEW_CURSOR_GUARD_WM,
3497
    PINEVIEW_FIFO_LINE_SIZE
3498
};
3499
static const struct intel_watermark_params g4x_wm_info = {
3500
    G4X_FIFO_SIZE,
3501
    G4X_MAX_WM,
3502
    G4X_MAX_WM,
3503
    2,
3504
    G4X_FIFO_LINE_SIZE,
3505
};
3506
static const struct intel_watermark_params g4x_cursor_wm_info = {
3507
    I965_CURSOR_FIFO,
3508
    I965_CURSOR_MAX_WM,
3509
    I965_CURSOR_DFT_WM,
3510
    2,
3511
    G4X_FIFO_LINE_SIZE,
3512
};
3513
static const struct intel_watermark_params i965_cursor_wm_info = {
3514
    I965_CURSOR_FIFO,
3515
    I965_CURSOR_MAX_WM,
3516
    I965_CURSOR_DFT_WM,
3517
    2,
3518
    I915_FIFO_LINE_SIZE,
3519
};
3520
static const struct intel_watermark_params i945_wm_info = {
3521
    I945_FIFO_SIZE,
3522
    I915_MAX_WM,
3523
    1,
3524
    2,
3525
    I915_FIFO_LINE_SIZE
3526
};
3527
static const struct intel_watermark_params i915_wm_info = {
3528
    I915_FIFO_SIZE,
3529
    I915_MAX_WM,
3530
    1,
3531
    2,
3532
    I915_FIFO_LINE_SIZE
3533
};
3534
static const struct intel_watermark_params i855_wm_info = {
3535
    I855GM_FIFO_SIZE,
3536
    I915_MAX_WM,
3537
    1,
3538
    2,
3539
    I830_FIFO_LINE_SIZE
3540
};
3541
static const struct intel_watermark_params i830_wm_info = {
3542
    I830_FIFO_SIZE,
3543
    I915_MAX_WM,
3544
    1,
3545
    2,
3546
    I830_FIFO_LINE_SIZE
3547
};
3548
 
3549
static const struct intel_watermark_params ironlake_display_wm_info = {
3550
    ILK_DISPLAY_FIFO,
3551
    ILK_DISPLAY_MAXWM,
3552
    ILK_DISPLAY_DFTWM,
3553
    2,
3554
    ILK_FIFO_LINE_SIZE
3555
};
3556
static const struct intel_watermark_params ironlake_cursor_wm_info = {
3557
    ILK_CURSOR_FIFO,
3558
    ILK_CURSOR_MAXWM,
3559
    ILK_CURSOR_DFTWM,
3560
    2,
3561
    ILK_FIFO_LINE_SIZE
3562
};
3563
static const struct intel_watermark_params ironlake_display_srwm_info = {
3564
    ILK_DISPLAY_SR_FIFO,
3565
    ILK_DISPLAY_MAX_SRWM,
3566
    ILK_DISPLAY_DFT_SRWM,
3567
    2,
3568
    ILK_FIFO_LINE_SIZE
3569
};
3570
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3571
    ILK_CURSOR_SR_FIFO,
3572
    ILK_CURSOR_MAX_SRWM,
3573
    ILK_CURSOR_DFT_SRWM,
3574
    2,
3575
    ILK_FIFO_LINE_SIZE
3576
};
3577
 
3578
static const struct intel_watermark_params sandybridge_display_wm_info = {
3579
    SNB_DISPLAY_FIFO,
3580
    SNB_DISPLAY_MAXWM,
3581
    SNB_DISPLAY_DFTWM,
3582
    2,
3583
    SNB_FIFO_LINE_SIZE
3584
};
3585
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3586
    SNB_CURSOR_FIFO,
3587
    SNB_CURSOR_MAXWM,
3588
    SNB_CURSOR_DFTWM,
3589
    2,
3590
    SNB_FIFO_LINE_SIZE
3591
};
3592
static const struct intel_watermark_params sandybridge_display_srwm_info = {
3593
    SNB_DISPLAY_SR_FIFO,
3594
    SNB_DISPLAY_MAX_SRWM,
3595
    SNB_DISPLAY_DFT_SRWM,
3596
    2,
3597
    SNB_FIFO_LINE_SIZE
3598
};
3599
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3600
    SNB_CURSOR_SR_FIFO,
3601
    SNB_CURSOR_MAX_SRWM,
3602
    SNB_CURSOR_DFT_SRWM,
3603
    2,
3604
    SNB_FIFO_LINE_SIZE
3605
};
3606
 
3607
 
3608
/**
3609
 * intel_calculate_wm - calculate watermark level
3610
 * @clock_in_khz: pixel clock
3611
 * @wm: chip FIFO params
3612
 * @pixel_size: display pixel size
3613
 * @latency_ns: memory latency for the platform
3614
 *
3615
 * Calculate the watermark level (the level at which the display plane will
3616
 * start fetching from memory again).  Each chip has a different display
3617
 * FIFO size and allocation, so the caller needs to figure that out and pass
3618
 * in the correct intel_watermark_params structure.
3619
 *
3620
 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3621
 * on the pixel size.  When it reaches the watermark level, it'll start
3622
 * fetching FIFO line sized based chunks from memory until the FIFO fills
3623
 * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3624
 * will occur, and a display engine hang could result.
3625
 */
3626
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3627
                    const struct intel_watermark_params *wm,
3628
                    int fifo_size,
3629
                    int pixel_size,
3630
                    unsigned long latency_ns)
3631
{
3632
    long entries_required, wm_size;
3633
 
3634
    /*
3635
     * Note: we need to make sure we don't overflow for various clock &
3636
     * latency values.
3637
     * clocks go from a few thousand to several hundred thousand.
3638
     * latency is usually a few thousand
3639
     */
3640
    entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3641
        1000;
3642
    entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3643
 
3644
    DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3645
 
3646
    wm_size = fifo_size - (entries_required + wm->guard_size);
3647
 
3648
    DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3649
 
3650
    /* Don't promote wm_size to unsigned... */
3651
    if (wm_size > (long)wm->max_wm)
3652
        wm_size = wm->max_wm;
3653
    if (wm_size <= 0)
3654
        wm_size = wm->default_wm;
3655
    return wm_size;
3656
}
3657
 
3658
struct cxsr_latency {
3659
    int is_desktop;
3660
    int is_ddr3;
3661
    unsigned long fsb_freq;
3662
    unsigned long mem_freq;
3663
    unsigned long display_sr;
3664
    unsigned long display_hpll_disable;
3665
    unsigned long cursor_sr;
3666
    unsigned long cursor_hpll_disable;
3667
};
3668
 
3669
static const struct cxsr_latency cxsr_latency_table[] = {
3670
    {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3671
    {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3672
    {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3673
    {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3674
    {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3675
 
3676
    {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3677
    {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3678
    {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3679
    {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3680
    {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3681
 
3682
    {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3683
    {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3684
    {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3685
    {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3686
    {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3687
 
3688
    {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3689
    {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3690
    {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3691
    {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3692
    {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3693
 
3694
    {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3695
    {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3696
    {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3697
    {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3698
    {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3699
 
3700
    {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3701
    {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3702
    {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3703
    {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3704
    {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3705
};
3706
 
3707
static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3708
                             int is_ddr3,
3709
                             int fsb,
3710
                             int mem)
3711
{
3712
    const struct cxsr_latency *latency;
3713
    int i;
3714
 
3715
    if (fsb == 0 || mem == 0)
3716
        return NULL;
3717
 
3718
    for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3719
        latency = &cxsr_latency_table[i];
3720
        if (is_desktop == latency->is_desktop &&
3721
            is_ddr3 == latency->is_ddr3 &&
3722
            fsb == latency->fsb_freq && mem == latency->mem_freq)
3723
            return latency;
3724
    }
3725
 
3726
    DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3727
 
3728
    return NULL;
3729
}
3730
 
3731
static void pineview_disable_cxsr(struct drm_device *dev)
3732
{
3733
    struct drm_i915_private *dev_priv = dev->dev_private;
3734
 
3735
    /* deactivate cxsr */
3736
    I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3737
}
3738
 
3739
/*
3740
 * Latency for FIFO fetches is dependent on several factors:
3741
 *   - memory configuration (speed, channels)
3742
 *   - chipset
3743
 *   - current MCH state
3744
 * It can be fairly high in some situations, so here we assume a fairly
3745
 * pessimal value.  It's a tradeoff between extra memory fetches (if we
3746
 * set this value too high, the FIFO will fetch frequently to stay full)
3747
 * and power consumption (set it too low to save power and we might see
3748
 * FIFO underruns and display "flicker").
3749
 *
3750
 * A value of 5us seems to be a good balance; safe for very low end
3751
 * platforms but not overly aggressive on lower latency configs.
3752
 */
3753
static const int latency_ns = 5000;
3754
 
3755
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3756
{
3757
	struct drm_i915_private *dev_priv = dev->dev_private;
3758
	uint32_t dsparb = I915_READ(DSPARB);
3759
	int size;
3760
 
3761
	size = dsparb & 0x7f;
3762
	if (plane)
3763
		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3764
 
3765
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3766
		      plane ? "B" : "A", size);
3767
 
3768
	return size;
3769
}
3770
 
3771
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3772
{
3773
	struct drm_i915_private *dev_priv = dev->dev_private;
3774
	uint32_t dsparb = I915_READ(DSPARB);
3775
	int size;
3776
 
3777
	size = dsparb & 0x1ff;
3778
	if (plane)
3779
		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3780
	size >>= 1; /* Convert to cachelines */
3781
 
3782
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3783
		      plane ? "B" : "A", size);
3784
 
3785
	return size;
3786
}
3787
 
3788
static int i845_get_fifo_size(struct drm_device *dev, int plane)
3789
{
3790
	struct drm_i915_private *dev_priv = dev->dev_private;
3791
	uint32_t dsparb = I915_READ(DSPARB);
3792
	int size;
3793
 
3794
	size = dsparb & 0x7f;
3795
	size >>= 2; /* Convert to cachelines */
3796
 
3797
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3798
		      plane ? "B" : "A",
3799
		      size);
3800
 
3801
	return size;
3802
}
3803
 
3804
static int i830_get_fifo_size(struct drm_device *dev, int plane)
3805
{
3806
	struct drm_i915_private *dev_priv = dev->dev_private;
3807
	uint32_t dsparb = I915_READ(DSPARB);
3808
	int size;
3809
 
3810
	size = dsparb & 0x7f;
3811
	size >>= 1; /* Convert to cachelines */
3812
 
3813
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3814
		      plane ? "B" : "A", size);
3815
 
3816
	return size;
3817
}
3818
 
3819
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3820
{
3821
    struct drm_crtc *crtc, *enabled = NULL;
3822
 
3823
    list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3824
        if (crtc->enabled && crtc->fb) {
3825
            if (enabled)
3826
                return NULL;
3827
            enabled = crtc;
3828
        }
3829
    }
3830
 
3831
    return enabled;
3832
}
3833
 
3834
static void pineview_update_wm(struct drm_device *dev)
3835
{
3836
	struct drm_i915_private *dev_priv = dev->dev_private;
3837
	struct drm_crtc *crtc;
3838
	const struct cxsr_latency *latency;
3839
	u32 reg;
3840
	unsigned long wm;
3841
 
3842
	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3843
					 dev_priv->fsb_freq, dev_priv->mem_freq);
3844
	if (!latency) {
3845
		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3846
		pineview_disable_cxsr(dev);
3847
		return;
3848
	}
3849
 
3850
	crtc = single_enabled_crtc(dev);
3851
	if (crtc) {
3852
		int clock = crtc->mode.clock;
3853
		int pixel_size = crtc->fb->bits_per_pixel / 8;
3854
 
3855
		/* Display SR */
3856
		wm = intel_calculate_wm(clock, &pineview_display_wm,
3857
					pineview_display_wm.fifo_size,
3858
					pixel_size, latency->display_sr);
3859
		reg = I915_READ(DSPFW1);
3860
		reg &= ~DSPFW_SR_MASK;
3861
		reg |= wm << DSPFW_SR_SHIFT;
3862
		I915_WRITE(DSPFW1, reg);
3863
		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3864
 
3865
		/* cursor SR */
3866
		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3867
					pineview_display_wm.fifo_size,
3868
					pixel_size, latency->cursor_sr);
3869
		reg = I915_READ(DSPFW3);
3870
		reg &= ~DSPFW_CURSOR_SR_MASK;
3871
		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3872
		I915_WRITE(DSPFW3, reg);
3873
 
3874
		/* Display HPLL off SR */
3875
		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3876
					pineview_display_hplloff_wm.fifo_size,
3877
					pixel_size, latency->display_hpll_disable);
3878
		reg = I915_READ(DSPFW3);
3879
		reg &= ~DSPFW_HPLL_SR_MASK;
3880
		reg |= wm & DSPFW_HPLL_SR_MASK;
3881
		I915_WRITE(DSPFW3, reg);
3882
 
3883
		/* cursor HPLL off SR */
3884
		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
3885
					pineview_display_hplloff_wm.fifo_size,
3886
					pixel_size, latency->cursor_hpll_disable);
3887
		reg = I915_READ(DSPFW3);
3888
		reg &= ~DSPFW_HPLL_CURSOR_MASK;
3889
		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
3890
		I915_WRITE(DSPFW3, reg);
3891
		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
3892
 
3893
		/* activate cxsr */
3894
		I915_WRITE(DSPFW3,
3895
			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
3896
		DRM_DEBUG_KMS("Self-refresh is enabled\n");
3897
	} else {
3898
		pineview_disable_cxsr(dev);
3899
		DRM_DEBUG_KMS("Self-refresh is disabled\n");
3900
	}
3901
}
3902
 
3903
static bool g4x_compute_wm0(struct drm_device *dev,
3904
                int plane,
3905
                const struct intel_watermark_params *display,
3906
                int display_latency_ns,
3907
                const struct intel_watermark_params *cursor,
3908
                int cursor_latency_ns,
3909
                int *plane_wm,
3910
                int *cursor_wm)
3911
{
3912
    struct drm_crtc *crtc;
3913
    int htotal, hdisplay, clock, pixel_size;
3914
    int line_time_us, line_count;
3915
    int entries, tlb_miss;
3916
 
3917
    crtc = intel_get_crtc_for_plane(dev, plane);
3918
    if (crtc->fb == NULL || !crtc->enabled) {
3919
        *cursor_wm = cursor->guard_size;
3920
        *plane_wm = display->guard_size;
3921
        return false;
3922
    }
3923
 
3924
    htotal = crtc->mode.htotal;
3925
    hdisplay = crtc->mode.hdisplay;
3926
    clock = crtc->mode.clock;
3927
    pixel_size = crtc->fb->bits_per_pixel / 8;
3928
 
3929
    /* Use the small buffer method to calculate plane watermark */
3930
    entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3931
    tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3932
    if (tlb_miss > 0)
3933
        entries += tlb_miss;
3934
    entries = DIV_ROUND_UP(entries, display->cacheline_size);
3935
    *plane_wm = entries + display->guard_size;
3936
    if (*plane_wm > (int)display->max_wm)
3937
        *plane_wm = display->max_wm;
3938
 
3939
    /* Use the large buffer method to calculate cursor watermark */
3940
    line_time_us = ((htotal * 1000) / clock);
3941
    line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3942
    entries = line_count * 64 * pixel_size;
3943
    tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3944
    if (tlb_miss > 0)
3945
        entries += tlb_miss;
3946
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3947
    *cursor_wm = entries + cursor->guard_size;
3948
    if (*cursor_wm > (int)cursor->max_wm)
3949
        *cursor_wm = (int)cursor->max_wm;
3950
 
3951
    return true;
3952
}
3953
 
3954
/*
3955
 * Check the wm result.
3956
 *
3957
 * If any calculated watermark values is larger than the maximum value that
3958
 * can be programmed into the associated watermark register, that watermark
3959
 * must be disabled.
3960
 */
3961
static bool g4x_check_srwm(struct drm_device *dev,
3962
			   int display_wm, int cursor_wm,
3963
			   const struct intel_watermark_params *display,
3964
			   const struct intel_watermark_params *cursor)
3965
{
3966
	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
3967
		      display_wm, cursor_wm);
3968
 
3969
	if (display_wm > display->max_wm) {
3970
		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
3971
			      display_wm, display->max_wm);
3972
		return false;
3973
	}
3974
 
3975
	if (cursor_wm > cursor->max_wm) {
3976
		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
3977
			      cursor_wm, cursor->max_wm);
3978
		return false;
3979
	}
3980
 
3981
	if (!(display_wm || cursor_wm)) {
3982
		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
3983
		return false;
3984
	}
3985
 
3986
	return true;
3987
}
3988
 
3989
static bool g4x_compute_srwm(struct drm_device *dev,
3990
			     int plane,
3991
			     int latency_ns,
3992
			     const struct intel_watermark_params *display,
3993
			     const struct intel_watermark_params *cursor,
3994
			     int *display_wm, int *cursor_wm)
3995
{
3996
	struct drm_crtc *crtc;
3997
	int hdisplay, htotal, pixel_size, clock;
3998
	unsigned long line_time_us;
3999
	int line_count, line_size;
4000
	int small, large;
4001
	int entries;
4002
 
4003
	if (!latency_ns) {
4004
		*display_wm = *cursor_wm = 0;
4005
		return false;
4006
	}
4007
 
4008
	crtc = intel_get_crtc_for_plane(dev, plane);
4009
	hdisplay = crtc->mode.hdisplay;
4010
	htotal = crtc->mode.htotal;
4011
	clock = crtc->mode.clock;
4012
	pixel_size = crtc->fb->bits_per_pixel / 8;
4013
 
4014
	line_time_us = (htotal * 1000) / clock;
4015
	line_count = (latency_ns / line_time_us + 1000) / 1000;
4016
	line_size = hdisplay * pixel_size;
4017
 
4018
	/* Use the minimum of the small and large buffer method for primary */
4019
	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4020
	large = line_count * line_size;
4021
 
4022
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4023
	*display_wm = entries + display->guard_size;
4024
 
4025
	/* calculate the self-refresh watermark for display cursor */
4026
	entries = line_count * pixel_size * 64;
4027
	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4028
	*cursor_wm = entries + cursor->guard_size;
4029
 
4030
	return g4x_check_srwm(dev,
4031
			      *display_wm, *cursor_wm,
4032
			      display, cursor);
4033
}
4034
 
4035
#define single_plane_enabled(mask) is_power_of_2(mask)
4036
 
4037
static void g4x_update_wm(struct drm_device *dev)
4038
{
4039
	static const int sr_latency_ns = 12000;
4040
	struct drm_i915_private *dev_priv = dev->dev_private;
4041
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4042
	int plane_sr, cursor_sr;
4043
	unsigned int enabled = 0;
4044
 
4045
	if (g4x_compute_wm0(dev, 0,
4046
			    &g4x_wm_info, latency_ns,
4047
			    &g4x_cursor_wm_info, latency_ns,
4048
			    &planea_wm, &cursora_wm))
4049
		enabled |= 1;
4050
 
4051
	if (g4x_compute_wm0(dev, 1,
4052
			    &g4x_wm_info, latency_ns,
4053
			    &g4x_cursor_wm_info, latency_ns,
4054
			    &planeb_wm, &cursorb_wm))
4055
		enabled |= 2;
4056
 
4057
	plane_sr = cursor_sr = 0;
4058
	if (single_plane_enabled(enabled) &&
4059
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
4060
			     sr_latency_ns,
4061
			     &g4x_wm_info,
4062
			     &g4x_cursor_wm_info,
4063
			     &plane_sr, &cursor_sr))
4064
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4065
	else
4066
		I915_WRITE(FW_BLC_SELF,
4067
			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4068
 
4069
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4070
		      planea_wm, cursora_wm,
4071
		      planeb_wm, cursorb_wm,
4072
		      plane_sr, cursor_sr);
4073
 
4074
	I915_WRITE(DSPFW1,
4075
		   (plane_sr << DSPFW_SR_SHIFT) |
4076
		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4077
		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
4078
		   planea_wm);
4079
	I915_WRITE(DSPFW2,
4080
		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4081
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
4082
	/* HPLL off in SR has some issues on G4x... disable it */
4083
	I915_WRITE(DSPFW3,
4084
		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4085
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4086
}
4087
 
4088
static void i965_update_wm(struct drm_device *dev)
4089
{
4090
	struct drm_i915_private *dev_priv = dev->dev_private;
4091
	struct drm_crtc *crtc;
4092
	int srwm = 1;
4093
	int cursor_sr = 16;
4094
 
4095
	/* Calc sr entries for one plane configs */
4096
	crtc = single_enabled_crtc(dev);
4097
	if (crtc) {
4098
		/* self-refresh has much higher latency */
4099
		static const int sr_latency_ns = 12000;
4100
		int clock = crtc->mode.clock;
4101
		int htotal = crtc->mode.htotal;
4102
		int hdisplay = crtc->mode.hdisplay;
4103
		int pixel_size = crtc->fb->bits_per_pixel / 8;
4104
		unsigned long line_time_us;
4105
		int entries;
4106
 
4107
		line_time_us = ((htotal * 1000) / clock);
4108
 
4109
		/* Use ns/us then divide to preserve precision */
4110
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4111
			pixel_size * hdisplay;
4112
		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4113
		srwm = I965_FIFO_SIZE - entries;
4114
		if (srwm < 0)
4115
			srwm = 1;
4116
		srwm &= 0x1ff;
4117
		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4118
			      entries, srwm);
4119
 
4120
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4121
			pixel_size * 64;
4122
		entries = DIV_ROUND_UP(entries,
4123
					  i965_cursor_wm_info.cacheline_size);
4124
		cursor_sr = i965_cursor_wm_info.fifo_size -
4125
			(entries + i965_cursor_wm_info.guard_size);
4126
 
4127
		if (cursor_sr > i965_cursor_wm_info.max_wm)
4128
			cursor_sr = i965_cursor_wm_info.max_wm;
4129
 
4130
		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4131
			      "cursor %d\n", srwm, cursor_sr);
4132
 
4133
		if (IS_CRESTLINE(dev))
4134
			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4135
	} else {
4136
		/* Turn off self refresh if both pipes are enabled */
4137
		if (IS_CRESTLINE(dev))
4138
			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4139
				   & ~FW_BLC_SELF_EN);
4140
	}
4141
 
4142
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4143
		      srwm);
4144
 
4145
	/* 965 has limitations... */
4146
	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4147
		   (8 << 16) | (8 << 8) | (8 << 0));
4148
	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4149
	/* update cursor SR watermark */
4150
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4151
}
4152
 
4153
static void i9xx_update_wm(struct drm_device *dev)
4154
{
4155
	struct drm_i915_private *dev_priv = dev->dev_private;
4156
	const struct intel_watermark_params *wm_info;
4157
	uint32_t fwater_lo;
4158
	uint32_t fwater_hi;
4159
	int cwm, srwm = 1;
4160
	int fifo_size;
4161
	int planea_wm, planeb_wm;
4162
	struct drm_crtc *crtc, *enabled = NULL;
4163
 
4164
	if (IS_I945GM(dev))
4165
		wm_info = &i945_wm_info;
4166
	else if (!IS_GEN2(dev))
4167
		wm_info = &i915_wm_info;
4168
	else
4169
		wm_info = &i855_wm_info;
4170
 
4171
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4172
	crtc = intel_get_crtc_for_plane(dev, 0);
4173
	if (crtc->enabled && crtc->fb) {
4174
		planea_wm = intel_calculate_wm(crtc->mode.clock,
4175
					       wm_info, fifo_size,
4176
					       crtc->fb->bits_per_pixel / 8,
4177
					       latency_ns);
4178
		enabled = crtc;
4179
	} else
4180
		planea_wm = fifo_size - wm_info->guard_size;
4181
 
4182
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4183
	crtc = intel_get_crtc_for_plane(dev, 1);
4184
	if (crtc->enabled && crtc->fb) {
4185
		planeb_wm = intel_calculate_wm(crtc->mode.clock,
4186
					       wm_info, fifo_size,
4187
					       crtc->fb->bits_per_pixel / 8,
4188
					       latency_ns);
4189
		if (enabled == NULL)
4190
			enabled = crtc;
4191
		else
4192
			enabled = NULL;
4193
	} else
4194
		planeb_wm = fifo_size - wm_info->guard_size;
4195
 
4196
	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4197
 
4198
	/*
4199
	 * Overlay gets an aggressive default since video jitter is bad.
4200
	 */
4201
	cwm = 2;
4202
 
4203
	/* Play safe and disable self-refresh before adjusting watermarks. */
4204
	if (IS_I945G(dev) || IS_I945GM(dev))
4205
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4206
	else if (IS_I915GM(dev))
4207
		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4208
 
4209
	/* Calc sr entries for one plane configs */
4210
	if (HAS_FW_BLC(dev) && enabled) {
4211
		/* self-refresh has much higher latency */
4212
		static const int sr_latency_ns = 6000;
4213
		int clock = enabled->mode.clock;
4214
		int htotal = enabled->mode.htotal;
4215
		int hdisplay = enabled->mode.hdisplay;
4216
		int pixel_size = enabled->fb->bits_per_pixel / 8;
4217
		unsigned long line_time_us;
4218
		int entries;
4219
 
4220
		line_time_us = (htotal * 1000) / clock;
4221
 
4222
		/* Use ns/us then divide to preserve precision */
4223
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4224
			pixel_size * hdisplay;
4225
		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4226
		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4227
		srwm = wm_info->fifo_size - entries;
4228
		if (srwm < 0)
4229
			srwm = 1;
4230
 
4231
		if (IS_I945G(dev) || IS_I945GM(dev))
4232
			I915_WRITE(FW_BLC_SELF,
4233
				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4234
		else if (IS_I915GM(dev))
4235
			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4236
	}
4237
 
4238
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4239
		      planea_wm, planeb_wm, cwm, srwm);
4240
 
4241
	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4242
	fwater_hi = (cwm & 0x1f);
4243
 
4244
	/* Set request length to 8 cachelines per fetch */
4245
	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4246
	fwater_hi = fwater_hi | (1 << 8);
4247
 
4248
	I915_WRITE(FW_BLC, fwater_lo);
4249
	I915_WRITE(FW_BLC2, fwater_hi);
4250
 
4251
	if (HAS_FW_BLC(dev)) {
4252
		if (enabled) {
4253
			if (IS_I945G(dev) || IS_I945GM(dev))
4254
				I915_WRITE(FW_BLC_SELF,
4255
					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4256
			else if (IS_I915GM(dev))
4257
				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4258
			DRM_DEBUG_KMS("memory self refresh enabled\n");
4259
		} else
4260
			DRM_DEBUG_KMS("memory self refresh disabled\n");
4261
	}
4262
}
4263
 
4264
static void i830_update_wm(struct drm_device *dev)
4265
{
4266
	struct drm_i915_private *dev_priv = dev->dev_private;
4267
	struct drm_crtc *crtc;
4268
	uint32_t fwater_lo;
4269
	int planea_wm;
4270
 
4271
	crtc = single_enabled_crtc(dev);
4272
	if (crtc == NULL)
4273
		return;
4274
 
4275
	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4276
				       dev_priv->display.get_fifo_size(dev, 0),
4277
				       crtc->fb->bits_per_pixel / 8,
4278
				       latency_ns);
4279
	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4280
	fwater_lo |= (3<<8) | planea_wm;
4281
 
4282
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4283
 
4284
	I915_WRITE(FW_BLC, fwater_lo);
4285
}
4286
 
4287
#define ILK_LP0_PLANE_LATENCY		700
4288
#define ILK_LP0_CURSOR_LATENCY		1300
4289
 
4290
/*
4291
 * Check the wm result.
4292
 *
4293
 * If any calculated watermark values is larger than the maximum value that
4294
 * can be programmed into the associated watermark register, that watermark
4295
 * must be disabled.
4296
 */
4297
static bool ironlake_check_srwm(struct drm_device *dev, int level,
4298
				int fbc_wm, int display_wm, int cursor_wm,
4299
				const struct intel_watermark_params *display,
4300
				const struct intel_watermark_params *cursor)
4301
{
4302
	struct drm_i915_private *dev_priv = dev->dev_private;
4303
 
4304
	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4305
		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4306
 
4307
	if (fbc_wm > SNB_FBC_MAX_SRWM) {
4308
		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4309
			      fbc_wm, SNB_FBC_MAX_SRWM, level);
4310
 
4311
		/* fbc has it's own way to disable FBC WM */
4312
		I915_WRITE(DISP_ARB_CTL,
4313
			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4314
		return false;
4315
	}
4316
 
4317
	if (display_wm > display->max_wm) {
4318
		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4319
			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
4320
		return false;
4321
	}
4322
 
4323
	if (cursor_wm > cursor->max_wm) {
4324
		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4325
			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4326
		return false;
4327
	}
4328
 
4329
	if (!(fbc_wm || display_wm || cursor_wm)) {
4330
		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4331
		return false;
4332
	}
4333
 
4334
	return true;
4335
}
4336
 
4337
/*
4338
 * Compute watermark values of WM[1-3],
4339
 */
4340
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4341
                  int latency_ns,
4342
                  const struct intel_watermark_params *display,
4343
                  const struct intel_watermark_params *cursor,
4344
                  int *fbc_wm, int *display_wm, int *cursor_wm)
4345
{
4346
    struct drm_crtc *crtc;
4347
    unsigned long line_time_us;
4348
    int hdisplay, htotal, pixel_size, clock;
4349
    int line_count, line_size;
4350
    int small, large;
4351
    int entries;
4352
 
4353
    if (!latency_ns) {
4354
        *fbc_wm = *display_wm = *cursor_wm = 0;
4355
        return false;
4356
    }
4357
 
4358
    crtc = intel_get_crtc_for_plane(dev, plane);
4359
    hdisplay = crtc->mode.hdisplay;
4360
    htotal = crtc->mode.htotal;
4361
    clock = crtc->mode.clock;
4362
    pixel_size = crtc->fb->bits_per_pixel / 8;
4363
 
4364
    line_time_us = (htotal * 1000) / clock;
4365
    line_count = (latency_ns / line_time_us + 1000) / 1000;
4366
    line_size = hdisplay * pixel_size;
4367
 
4368
    /* Use the minimum of the small and large buffer method for primary */
4369
    small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4370
    large = line_count * line_size;
4371
 
4372
    entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4373
    *display_wm = entries + display->guard_size;
4374
 
4375
    /*
4376
     * Spec says:
4377
     * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4378
     */
4379
    *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4380
 
4381
    /* calculate the self-refresh watermark for display cursor */
4382
    entries = line_count * pixel_size * 64;
4383
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4384
    *cursor_wm = entries + cursor->guard_size;
4385
 
4386
    return ironlake_check_srwm(dev, level,
4387
                   *fbc_wm, *display_wm, *cursor_wm,
4388
                   display, cursor);
4389
}
4390
 
4391
static void ironlake_update_wm(struct drm_device *dev)
4392
{
4393
	struct drm_i915_private *dev_priv = dev->dev_private;
4394
	int fbc_wm, plane_wm, cursor_wm;
4395
	unsigned int enabled;
4396
 
4397
	enabled = 0;
4398
	if (g4x_compute_wm0(dev, 0,
4399
			    &ironlake_display_wm_info,
4400
			    ILK_LP0_PLANE_LATENCY,
4401
			    &ironlake_cursor_wm_info,
4402
			    ILK_LP0_CURSOR_LATENCY,
4403
			    &plane_wm, &cursor_wm)) {
4404
		I915_WRITE(WM0_PIPEA_ILK,
4405
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4406
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4407
			      " plane %d, " "cursor: %d\n",
4408
			      plane_wm, cursor_wm);
4409
		enabled |= 1;
4410
	}
4411
 
4412
	if (g4x_compute_wm0(dev, 1,
4413
			    &ironlake_display_wm_info,
4414
			    ILK_LP0_PLANE_LATENCY,
4415
			    &ironlake_cursor_wm_info,
4416
			    ILK_LP0_CURSOR_LATENCY,
4417
			    &plane_wm, &cursor_wm)) {
4418
		I915_WRITE(WM0_PIPEB_ILK,
4419
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4420
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4421
			      " plane %d, cursor: %d\n",
4422
			      plane_wm, cursor_wm);
4423
		enabled |= 2;
4424
	}
4425
 
4426
	/*
4427
	 * Calculate and update the self-refresh watermark only when one
4428
	 * display plane is used.
4429
	 */
4430
	I915_WRITE(WM3_LP_ILK, 0);
4431
	I915_WRITE(WM2_LP_ILK, 0);
4432
	I915_WRITE(WM1_LP_ILK, 0);
4433
 
4434
	if (!single_plane_enabled(enabled))
4435
		return;
4436
	enabled = ffs(enabled) - 1;
4437
 
4438
	/* WM1 */
4439
	if (!ironlake_compute_srwm(dev, 1, enabled,
4440
				   ILK_READ_WM1_LATENCY() * 500,
4441
				   &ironlake_display_srwm_info,
4442
				   &ironlake_cursor_srwm_info,
4443
				   &fbc_wm, &plane_wm, &cursor_wm))
4444
		return;
4445
 
4446
	I915_WRITE(WM1_LP_ILK,
4447
		   WM1_LP_SR_EN |
4448
		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4449
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4450
		   (plane_wm << WM1_LP_SR_SHIFT) |
4451
		   cursor_wm);
4452
 
4453
	/* WM2 */
4454
	if (!ironlake_compute_srwm(dev, 2, enabled,
4455
				   ILK_READ_WM2_LATENCY() * 500,
4456
				   &ironlake_display_srwm_info,
4457
				   &ironlake_cursor_srwm_info,
4458
				   &fbc_wm, &plane_wm, &cursor_wm))
4459
		return;
4460
 
4461
	I915_WRITE(WM2_LP_ILK,
4462
		   WM2_LP_EN |
4463
		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4464
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4465
		   (plane_wm << WM1_LP_SR_SHIFT) |
4466
		   cursor_wm);
4467
 
4468
	/*
4469
	 * WM3 is unsupported on ILK, probably because we don't have latency
4470
	 * data for that power state
4471
	 */
4472
}
4473
 
2342 Serge 4474
void sandybridge_update_wm(struct drm_device *dev)
2327 Serge 4475
{
4476
	struct drm_i915_private *dev_priv = dev->dev_private;
4477
	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4478
	int fbc_wm, plane_wm, cursor_wm;
4479
	unsigned int enabled;
4480
 
4481
	enabled = 0;
4482
	if (g4x_compute_wm0(dev, 0,
4483
			    &sandybridge_display_wm_info, latency,
4484
			    &sandybridge_cursor_wm_info, latency,
4485
			    &plane_wm, &cursor_wm)) {
4486
		I915_WRITE(WM0_PIPEA_ILK,
4487
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4488
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4489
			      " plane %d, " "cursor: %d\n",
4490
			      plane_wm, cursor_wm);
4491
		enabled |= 1;
4492
	}
4493
 
4494
	if (g4x_compute_wm0(dev, 1,
4495
			    &sandybridge_display_wm_info, latency,
4496
			    &sandybridge_cursor_wm_info, latency,
4497
			    &plane_wm, &cursor_wm)) {
4498
		I915_WRITE(WM0_PIPEB_ILK,
4499
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4500
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4501
			      " plane %d, cursor: %d\n",
4502
			      plane_wm, cursor_wm);
4503
		enabled |= 2;
4504
	}
4505
 
2342 Serge 4506
	/* IVB has 3 pipes */
4507
	if (IS_IVYBRIDGE(dev) &&
4508
	    g4x_compute_wm0(dev, 2,
4509
			    &sandybridge_display_wm_info, latency,
4510
			    &sandybridge_cursor_wm_info, latency,
4511
			    &plane_wm, &cursor_wm)) {
4512
		I915_WRITE(WM0_PIPEC_IVB,
4513
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4514
		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4515
			      " plane %d, cursor: %d\n",
4516
			      plane_wm, cursor_wm);
4517
		enabled |= 3;
4518
	}
4519
 
2327 Serge 4520
	/*
4521
	 * Calculate and update the self-refresh watermark only when one
4522
	 * display plane is used.
4523
	 *
4524
	 * SNB support 3 levels of watermark.
4525
	 *
4526
	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4527
	 * and disabled in the descending order
4528
	 *
4529
	 */
4530
	I915_WRITE(WM3_LP_ILK, 0);
4531
	I915_WRITE(WM2_LP_ILK, 0);
4532
	I915_WRITE(WM1_LP_ILK, 0);
4533
 
2342 Serge 4534
	if (!single_plane_enabled(enabled) ||
4535
	    dev_priv->sprite_scaling_enabled)
2327 Serge 4536
		return;
4537
	enabled = ffs(enabled) - 1;
4538
 
4539
	/* WM1 */
4540
	if (!ironlake_compute_srwm(dev, 1, enabled,
4541
				   SNB_READ_WM1_LATENCY() * 500,
4542
				   &sandybridge_display_srwm_info,
4543
				   &sandybridge_cursor_srwm_info,
4544
				   &fbc_wm, &plane_wm, &cursor_wm))
4545
		return;
4546
 
4547
	I915_WRITE(WM1_LP_ILK,
4548
		   WM1_LP_SR_EN |
4549
		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4550
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4551
		   (plane_wm << WM1_LP_SR_SHIFT) |
4552
		   cursor_wm);
4553
 
4554
	/* WM2 */
4555
	if (!ironlake_compute_srwm(dev, 2, enabled,
4556
				   SNB_READ_WM2_LATENCY() * 500,
4557
				   &sandybridge_display_srwm_info,
4558
				   &sandybridge_cursor_srwm_info,
4559
				   &fbc_wm, &plane_wm, &cursor_wm))
4560
		return;
4561
 
4562
	I915_WRITE(WM2_LP_ILK,
4563
		   WM2_LP_EN |
4564
		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4565
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4566
		   (plane_wm << WM1_LP_SR_SHIFT) |
4567
		   cursor_wm);
4568
 
4569
	/* WM3 */
4570
	if (!ironlake_compute_srwm(dev, 3, enabled,
4571
				   SNB_READ_WM3_LATENCY() * 500,
4572
				   &sandybridge_display_srwm_info,
4573
				   &sandybridge_cursor_srwm_info,
4574
				   &fbc_wm, &plane_wm, &cursor_wm))
4575
		return;
4576
 
4577
	I915_WRITE(WM3_LP_ILK,
4578
		   WM3_LP_EN |
4579
		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4580
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4581
		   (plane_wm << WM1_LP_SR_SHIFT) |
4582
		   cursor_wm);
2342 Serge 4583
}
2336 Serge 4584
 
2342 Serge 4585
static bool
4586
sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4587
			      uint32_t sprite_width, int pixel_size,
4588
			      const struct intel_watermark_params *display,
4589
			      int display_latency_ns, int *sprite_wm)
4590
{
4591
	struct drm_crtc *crtc;
4592
	int clock;
4593
	int entries, tlb_miss;
2336 Serge 4594
 
2342 Serge 4595
	crtc = intel_get_crtc_for_plane(dev, plane);
4596
	if (crtc->fb == NULL || !crtc->enabled) {
4597
		*sprite_wm = display->guard_size;
4598
		return false;
4599
	}
4600
 
4601
	clock = crtc->mode.clock;
4602
 
4603
	/* Use the small buffer method to calculate the sprite watermark */
4604
	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4605
	tlb_miss = display->fifo_size*display->cacheline_size -
4606
		sprite_width * 8;
4607
	if (tlb_miss > 0)
4608
		entries += tlb_miss;
4609
	entries = DIV_ROUND_UP(entries, display->cacheline_size);
4610
	*sprite_wm = entries + display->guard_size;
4611
	if (*sprite_wm > (int)display->max_wm)
4612
		*sprite_wm = display->max_wm;
4613
 
4614
	return true;
2327 Serge 4615
}
4616
 
2342 Serge 4617
static bool
4618
sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4619
				uint32_t sprite_width, int pixel_size,
4620
				const struct intel_watermark_params *display,
4621
				int latency_ns, int *sprite_wm)
4622
{
4623
	struct drm_crtc *crtc;
4624
	unsigned long line_time_us;
4625
	int clock;
4626
	int line_count, line_size;
4627
	int small, large;
4628
	int entries;
4629
 
4630
	if (!latency_ns) {
4631
		*sprite_wm = 0;
4632
		return false;
4633
	}
4634
 
4635
	crtc = intel_get_crtc_for_plane(dev, plane);
4636
	clock = crtc->mode.clock;
4637
 
4638
	line_time_us = (sprite_width * 1000) / clock;
4639
	line_count = (latency_ns / line_time_us + 1000) / 1000;
4640
	line_size = sprite_width * pixel_size;
4641
 
4642
	/* Use the minimum of the small and large buffer method for primary */
4643
	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4644
	large = line_count * line_size;
4645
 
4646
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4647
	*sprite_wm = entries + display->guard_size;
4648
 
4649
	return *sprite_wm > 0x3ff ? false : true;
4650
}
4651
 
4652
static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4653
					 uint32_t sprite_width, int pixel_size)
4654
{
4655
	struct drm_i915_private *dev_priv = dev->dev_private;
4656
	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4657
	int sprite_wm, reg;
4658
	int ret;
4659
 
4660
	switch (pipe) {
4661
	case 0:
4662
		reg = WM0_PIPEA_ILK;
4663
		break;
4664
	case 1:
4665
		reg = WM0_PIPEB_ILK;
4666
		break;
4667
	case 2:
4668
		reg = WM0_PIPEC_IVB;
4669
		break;
4670
	default:
4671
		return; /* bad pipe */
4672
	}
4673
 
4674
	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4675
					    &sandybridge_display_wm_info,
4676
					    latency, &sprite_wm);
4677
	if (!ret) {
4678
		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4679
			      pipe);
4680
		return;
4681
	}
4682
 
4683
	I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4684
	DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4685
 
4686
 
4687
	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4688
					      pixel_size,
4689
					      &sandybridge_display_srwm_info,
4690
					      SNB_READ_WM1_LATENCY() * 500,
4691
					      &sprite_wm);
4692
	if (!ret) {
4693
		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4694
			      pipe);
4695
		return;
4696
	}
4697
	I915_WRITE(WM1S_LP_ILK, sprite_wm);
4698
 
4699
	/* Only IVB has two more LP watermarks for sprite */
4700
	if (!IS_IVYBRIDGE(dev))
4701
		return;
4702
 
4703
	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4704
					      pixel_size,
4705
					      &sandybridge_display_srwm_info,
4706
					      SNB_READ_WM2_LATENCY() * 500,
4707
					      &sprite_wm);
4708
	if (!ret) {
4709
		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4710
			      pipe);
4711
		return;
4712
	}
4713
	I915_WRITE(WM2S_LP_IVB, sprite_wm);
4714
 
4715
	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4716
					      pixel_size,
4717
					      &sandybridge_display_srwm_info,
4718
					      SNB_READ_WM3_LATENCY() * 500,
4719
					      &sprite_wm);
4720
	if (!ret) {
4721
		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4722
			      pipe);
4723
		return;
4724
	}
4725
	I915_WRITE(WM3S_LP_IVB, sprite_wm);
4726
}
4727
 
2327 Serge 4728
/**
4729
 * intel_update_watermarks - update FIFO watermark values based on current modes
4730
 *
4731
 * Calculate watermark values for the various WM regs based on current mode
4732
 * and plane configuration.
4733
 *
4734
 * There are several cases to deal with here:
4735
 *   - normal (i.e. non-self-refresh)
4736
 *   - self-refresh (SR) mode
4737
 *   - lines are large relative to FIFO size (buffer can hold up to 2)
4738
 *   - lines are small relative to FIFO size (buffer can hold more than 2
4739
 *     lines), so need to account for TLB latency
4740
 *
4741
 *   The normal calculation is:
4742
 *     watermark = dotclock * bytes per pixel * latency
4743
 *   where latency is platform & configuration dependent (we assume pessimal
4744
 *   values here).
4745
 *
4746
 *   The SR calculation is:
4747
 *     watermark = (trunc(latency/line time)+1) * surface width *
4748
 *       bytes per pixel
4749
 *   where
4750
 *     line time = htotal / dotclock
4751
 *     surface width = hdisplay for normal plane and 64 for cursor
4752
 *   and latency is assumed to be high, as above.
4753
 *
4754
 * The final value programmed to the register should always be rounded up,
4755
 * and include an extra 2 entries to account for clock crossings.
4756
 *
4757
 * We don't use the sprite, so we can ignore that.  And on Crestline we have
4758
 * to set the non-SR watermarks to 8.
4759
 */
4760
static void intel_update_watermarks(struct drm_device *dev)
4761
{
4762
	struct drm_i915_private *dev_priv = dev->dev_private;
2351 Serge 4763
 
2327 Serge 4764
	if (dev_priv->display.update_wm)
4765
		dev_priv->display.update_wm(dev);
4766
}
4767
 
2342 Serge 4768
void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4769
				    uint32_t sprite_width, int pixel_size)
4770
{
4771
	struct drm_i915_private *dev_priv = dev->dev_private;
4772
 
4773
	if (dev_priv->display.update_sprite_wm)
4774
		dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4775
						   pixel_size);
4776
}
4777
 
2327 Serge 4778
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4779
{
2342 Serge 4780
	if (i915_panel_use_ssc >= 0)
4781
		return i915_panel_use_ssc != 0;
4782
	return dev_priv->lvds_use_ssc
2327 Serge 4783
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4784
}
4785
 
4786
/**
4787
 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4788
 * @crtc: CRTC structure
2342 Serge 4789
 * @mode: requested mode
2327 Serge 4790
 *
4791
 * A pipe may be connected to one or more outputs.  Based on the depth of the
4792
 * attached framebuffer, choose a good color depth to use on the pipe.
4793
 *
4794
 * If possible, match the pipe depth to the fb depth.  In some cases, this
4795
 * isn't ideal, because the connected output supports a lesser or restricted
4796
 * set of depths.  Resolve that here:
4797
 *    LVDS typically supports only 6bpc, so clamp down in that case
4798
 *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4799
 *    Displays may support a restricted set as well, check EDID and clamp as
4800
 *      appropriate.
2342 Serge 4801
 *    DP may want to dither down to 6bpc to fit larger modes
2327 Serge 4802
 *
4803
 * RETURNS:
4804
 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4805
 * true if they don't match).
4806
 */
4807
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
2342 Serge 4808
					 unsigned int *pipe_bpp,
4809
					 struct drm_display_mode *mode)
2327 Serge 4810
{
4811
	struct drm_device *dev = crtc->dev;
4812
	struct drm_i915_private *dev_priv = dev->dev_private;
4813
	struct drm_encoder *encoder;
4814
	struct drm_connector *connector;
4815
	unsigned int display_bpc = UINT_MAX, bpc;
4816
 
4817
	/* Walk the encoders & connectors on this crtc, get min bpc */
4818
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4819
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4820
 
4821
		if (encoder->crtc != crtc)
4822
			continue;
4823
 
4824
		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4825
			unsigned int lvds_bpc;
4826
 
4827
			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4828
			    LVDS_A3_POWER_UP)
4829
				lvds_bpc = 8;
4830
			else
4831
				lvds_bpc = 6;
4832
 
4833
			if (lvds_bpc < display_bpc) {
2342 Serge 4834
				DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
2327 Serge 4835
				display_bpc = lvds_bpc;
4836
			}
4837
			continue;
4838
		}
4839
 
4840
		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4841
			/* Use VBT settings if we have an eDP panel */
4842
			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4843
 
4844
			if (edp_bpc < display_bpc) {
2342 Serge 4845
				DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
2327 Serge 4846
				display_bpc = edp_bpc;
4847
			}
4848
			continue;
4849
		}
4850
 
4851
		/* Not one of the known troublemakers, check the EDID */
4852
		list_for_each_entry(connector, &dev->mode_config.connector_list,
4853
				    head) {
4854
			if (connector->encoder != encoder)
4855
				continue;
4856
 
4857
			/* Don't use an invalid EDID bpc value */
4858
			if (connector->display_info.bpc &&
4859
			    connector->display_info.bpc < display_bpc) {
2342 Serge 4860
				DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
2327 Serge 4861
				display_bpc = connector->display_info.bpc;
4862
			}
4863
		}
4864
 
4865
		/*
4866
		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4867
		 * through, clamp it down.  (Note: >12bpc will be caught below.)
4868
		 */
4869
		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4870
			if (display_bpc > 8 && display_bpc < 12) {
2342 Serge 4871
				DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
2327 Serge 4872
				display_bpc = 12;
4873
			} else {
2342 Serge 4874
				DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
2327 Serge 4875
				display_bpc = 8;
4876
			}
4877
		}
4878
	}
4879
 
2342 Serge 4880
	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4881
		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
4882
		display_bpc = 6;
4883
	}
4884
 
2327 Serge 4885
	/*
4886
	 * We could just drive the pipe at the highest bpc all the time and
4887
	 * enable dithering as needed, but that costs bandwidth.  So choose
4888
	 * the minimum value that expresses the full color range of the fb but
4889
	 * also stays within the max display bpc discovered above.
4890
	 */
4891
 
4892
	switch (crtc->fb->depth) {
4893
	case 8:
4894
		bpc = 8; /* since we go through a colormap */
4895
		break;
4896
	case 15:
4897
	case 16:
4898
		bpc = 6; /* min is 18bpp */
4899
		break;
4900
	case 24:
2342 Serge 4901
		bpc = 8;
2327 Serge 4902
		break;
4903
	case 30:
2342 Serge 4904
		bpc = 10;
2327 Serge 4905
		break;
4906
	case 48:
2342 Serge 4907
		bpc = 12;
2327 Serge 4908
		break;
4909
	default:
4910
		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4911
		bpc = min((unsigned int)8, display_bpc);
4912
		break;
4913
	}
4914
 
2342 Serge 4915
	display_bpc = min(display_bpc, bpc);
4916
 
4917
	DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
2327 Serge 4918
			 bpc, display_bpc);
4919
 
2342 Serge 4920
	*pipe_bpp = display_bpc * 3;
2327 Serge 4921
 
4922
	return display_bpc != bpc;
4923
}
4924
 
4925
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4926
                  struct drm_display_mode *mode,
4927
                  struct drm_display_mode *adjusted_mode,
4928
                  int x, int y,
4929
                  struct drm_framebuffer *old_fb)
4930
{
4931
    struct drm_device *dev = crtc->dev;
4932
    struct drm_i915_private *dev_priv = dev->dev_private;
4933
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4934
    int pipe = intel_crtc->pipe;
4935
    int plane = intel_crtc->plane;
4936
    int refclk, num_connectors = 0;
4937
    intel_clock_t clock, reduced_clock;
4938
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4939
    bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4940
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4941
    struct drm_mode_config *mode_config = &dev->mode_config;
4942
    struct intel_encoder *encoder;
4943
    const intel_limit_t *limit;
4944
    int ret;
4945
    u32 temp;
4946
    u32 lvds_sync = 0;
4947
 
4948
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4949
        if (encoder->base.crtc != crtc)
4950
            continue;
4951
 
4952
        switch (encoder->type) {
4953
        case INTEL_OUTPUT_LVDS:
4954
            is_lvds = true;
4955
            break;
4956
        case INTEL_OUTPUT_SDVO:
4957
        case INTEL_OUTPUT_HDMI:
4958
            is_sdvo = true;
4959
            if (encoder->needs_tv_clock)
4960
                is_tv = true;
4961
            break;
4962
        case INTEL_OUTPUT_DVO:
4963
            is_dvo = true;
4964
            break;
4965
        case INTEL_OUTPUT_TVOUT:
4966
            is_tv = true;
4967
            break;
4968
        case INTEL_OUTPUT_ANALOG:
4969
            is_crt = true;
4970
            break;
4971
        case INTEL_OUTPUT_DISPLAYPORT:
4972
            is_dp = true;
4973
            break;
4974
        }
4975
 
4976
        num_connectors++;
4977
    }
4978
 
4979
    if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4980
        refclk = dev_priv->lvds_ssc_freq * 1000;
4981
        DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4982
                  refclk / 1000);
4983
    } else if (!IS_GEN2(dev)) {
4984
        refclk = 96000;
4985
    } else {
4986
        refclk = 48000;
4987
    }
4988
 
4989
    /*
4990
     * Returns a set of divisors for the desired target clock with the given
4991
     * refclk, or FALSE.  The returned values represent the clock equation:
4992
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4993
     */
4994
    limit = intel_limit(crtc, refclk);
4995
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
4996
    if (!ok) {
4997
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
4998
        return -EINVAL;
4999
    }
5000
 
5001
    /* Ensure that the cursor is valid for the new mode before changing... */
5002
//    intel_crtc_update_cursor(crtc, true);
5003
 
5004
    if (is_lvds && dev_priv->lvds_downclock_avail) {
5005
        has_reduced_clock = limit->find_pll(limit, crtc,
5006
                            dev_priv->lvds_downclock,
5007
                            refclk,
5008
                            &reduced_clock);
5009
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
5010
            /*
5011
             * If the different P is found, it means that we can't
5012
             * switch the display clock by using the FP0/FP1.
5013
             * In such case we will disable the LVDS downclock
5014
             * feature.
5015
             */
5016
            DRM_DEBUG_KMS("Different P is found for "
5017
                      "LVDS clock/downclock\n");
5018
            has_reduced_clock = 0;
5019
        }
5020
    }
5021
    /* SDVO TV has fixed PLL values depend on its clock range,
5022
       this mirrors vbios setting. */
5023
    if (is_sdvo && is_tv) {
5024
        if (adjusted_mode->clock >= 100000
5025
            && adjusted_mode->clock < 140500) {
5026
            clock.p1 = 2;
5027
            clock.p2 = 10;
5028
            clock.n = 3;
5029
            clock.m1 = 16;
5030
            clock.m2 = 8;
5031
        } else if (adjusted_mode->clock >= 140500
5032
               && adjusted_mode->clock <= 200000) {
5033
            clock.p1 = 1;
5034
            clock.p2 = 10;
5035
            clock.n = 6;
5036
            clock.m1 = 12;
5037
            clock.m2 = 8;
5038
        }
5039
    }
5040
 
5041
    if (IS_PINEVIEW(dev)) {
5042
        fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
5043
        if (has_reduced_clock)
5044
            fp2 = (1 << reduced_clock.n) << 16 |
5045
                reduced_clock.m1 << 8 | reduced_clock.m2;
5046
    } else {
5047
        fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5048
        if (has_reduced_clock)
5049
            fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5050
                reduced_clock.m2;
5051
    }
5052
 
5053
    dpll = DPLL_VGA_MODE_DIS;
5054
 
5055
    if (!IS_GEN2(dev)) {
5056
        if (is_lvds)
5057
            dpll |= DPLLB_MODE_LVDS;
5058
        else
5059
            dpll |= DPLLB_MODE_DAC_SERIAL;
5060
        if (is_sdvo) {
5061
            int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5062
            if (pixel_multiplier > 1) {
5063
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5064
                    dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5065
            }
5066
            dpll |= DPLL_DVO_HIGH_SPEED;
5067
        }
5068
        if (is_dp)
5069
            dpll |= DPLL_DVO_HIGH_SPEED;
5070
 
5071
        /* compute bitmask from p1 value */
5072
        if (IS_PINEVIEW(dev))
5073
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5074
        else {
5075
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5076
            if (IS_G4X(dev) && has_reduced_clock)
5077
                dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5078
        }
5079
        switch (clock.p2) {
5080
        case 5:
5081
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5082
            break;
5083
        case 7:
5084
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5085
            break;
5086
        case 10:
5087
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5088
            break;
5089
        case 14:
5090
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5091
            break;
5092
        }
5093
        if (INTEL_INFO(dev)->gen >= 4)
5094
            dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5095
    } else {
5096
        if (is_lvds) {
5097
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5098
        } else {
5099
            if (clock.p1 == 2)
5100
                dpll |= PLL_P1_DIVIDE_BY_TWO;
5101
            else
5102
                dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5103
            if (clock.p2 == 4)
5104
                dpll |= PLL_P2_DIVIDE_BY_4;
5105
        }
5106
    }
5107
 
5108
    if (is_sdvo && is_tv)
5109
        dpll |= PLL_REF_INPUT_TVCLKINBC;
5110
    else if (is_tv)
5111
        /* XXX: just matching BIOS for now */
5112
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
5113
        dpll |= 3;
5114
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5115
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5116
    else
5117
        dpll |= PLL_REF_INPUT_DREFCLK;
5118
 
5119
    /* setup pipeconf */
5120
    pipeconf = I915_READ(PIPECONF(pipe));
5121
 
5122
    /* Set up the display plane register */
5123
    dspcntr = DISPPLANE_GAMMA_ENABLE;
5124
 
5125
    /* Ironlake's plane is forced to pipe, bit 24 is to
5126
       enable color space conversion */
5127
    if (pipe == 0)
5128
        dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5129
    else
5130
        dspcntr |= DISPPLANE_SEL_PIPE_B;
5131
 
5132
    if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5133
        /* Enable pixel doubling when the dot clock is > 90% of the (display)
5134
         * core speed.
5135
         *
5136
         * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5137
         * pipe == 0 check?
5138
         */
5139
        if (mode->clock >
5140
            dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5141
            pipeconf |= PIPECONF_DOUBLE_WIDE;
5142
        else
5143
            pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5144
    }
5145
 
2342 Serge 5146
	/* default to 8bpc */
5147
	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5148
	if (is_dp) {
5149
		if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5150
			pipeconf |= PIPECONF_BPP_6 |
5151
				    PIPECONF_DITHER_EN |
5152
				    PIPECONF_DITHER_TYPE_SP;
5153
		}
5154
	}
5155
 
2327 Serge 5156
    dpll |= DPLL_VCO_ENABLE;
5157
 
5158
    DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5159
    drm_mode_debug_printmodeline(mode);
5160
 
5161
    I915_WRITE(FP0(pipe), fp);
5162
    I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5163
 
5164
    POSTING_READ(DPLL(pipe));
5165
    udelay(150);
5166
 
5167
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5168
     * This is an exception to the general rule that mode_set doesn't turn
5169
     * things on.
5170
     */
5171
    if (is_lvds) {
5172
        temp = I915_READ(LVDS);
5173
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5174
        if (pipe == 1) {
5175
            temp |= LVDS_PIPEB_SELECT;
5176
        } else {
5177
            temp &= ~LVDS_PIPEB_SELECT;
5178
        }
5179
        /* set the corresponsding LVDS_BORDER bit */
5180
        temp |= dev_priv->lvds_border_bits;
5181
        /* Set the B0-B3 data pairs corresponding to whether we're going to
5182
         * set the DPLLs for dual-channel mode or not.
5183
         */
5184
        if (clock.p2 == 7)
5185
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5186
        else
5187
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5188
 
5189
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5190
         * appropriately here, but we need to look more thoroughly into how
5191
         * panels behave in the two modes.
5192
         */
5193
        /* set the dithering flag on LVDS as needed */
5194
        if (INTEL_INFO(dev)->gen >= 4) {
5195
            if (dev_priv->lvds_dither)
5196
                temp |= LVDS_ENABLE_DITHER;
5197
            else
5198
                temp &= ~LVDS_ENABLE_DITHER;
5199
        }
5200
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5201
            lvds_sync |= LVDS_HSYNC_POLARITY;
5202
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5203
            lvds_sync |= LVDS_VSYNC_POLARITY;
5204
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5205
            != lvds_sync) {
5206
            char flags[2] = "-+";
5207
            DRM_INFO("Changing LVDS panel from "
5208
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5209
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
5210
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
5211
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5212
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5213
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5214
            temp |= lvds_sync;
5215
        }
5216
        I915_WRITE(LVDS, temp);
5217
    }
5218
 
5219
    if (is_dp) {
5220
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
5221
    }
5222
 
5223
    I915_WRITE(DPLL(pipe), dpll);
5224
 
5225
    /* Wait for the clocks to stabilize. */
5226
    POSTING_READ(DPLL(pipe));
5227
    udelay(150);
5228
 
5229
    if (INTEL_INFO(dev)->gen >= 4) {
5230
        temp = 0;
5231
        if (is_sdvo) {
5232
            temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5233
            if (temp > 1)
5234
                temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5235
            else
5236
                temp = 0;
5237
        }
5238
        I915_WRITE(DPLL_MD(pipe), temp);
5239
    } else {
5240
        /* The pixel multiplier can only be updated once the
5241
         * DPLL is enabled and the clocks are stable.
5242
         *
5243
         * So write it again.
5244
         */
5245
        I915_WRITE(DPLL(pipe), dpll);
5246
    }
5247
 
5248
    intel_crtc->lowfreq_avail = false;
5249
    if (is_lvds && has_reduced_clock && i915_powersave) {
5250
        I915_WRITE(FP1(pipe), fp2);
5251
        intel_crtc->lowfreq_avail = true;
5252
        if (HAS_PIPE_CXSR(dev)) {
5253
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5254
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5255
        }
5256
    } else {
5257
        I915_WRITE(FP1(pipe), fp);
5258
        if (HAS_PIPE_CXSR(dev)) {
5259
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5260
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5261
        }
5262
    }
5263
 
2360 Serge 5264
	pipeconf &= ~PIPECONF_INTERLACE_MASK;
2327 Serge 5265
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5266
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5267
        /* the chip adds 2 halflines automatically */
5268
        adjusted_mode->crtc_vdisplay -= 1;
5269
        adjusted_mode->crtc_vtotal -= 1;
5270
        adjusted_mode->crtc_vblank_start -= 1;
5271
        adjusted_mode->crtc_vblank_end -= 1;
5272
        adjusted_mode->crtc_vsync_end -= 1;
5273
        adjusted_mode->crtc_vsync_start -= 1;
5274
    } else
2360 Serge 5275
		pipeconf |= PIPECONF_PROGRESSIVE;
2327 Serge 5276
 
5277
    I915_WRITE(HTOTAL(pipe),
5278
           (adjusted_mode->crtc_hdisplay - 1) |
5279
           ((adjusted_mode->crtc_htotal - 1) << 16));
5280
    I915_WRITE(HBLANK(pipe),
5281
           (adjusted_mode->crtc_hblank_start - 1) |
5282
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
5283
    I915_WRITE(HSYNC(pipe),
5284
           (adjusted_mode->crtc_hsync_start - 1) |
5285
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
5286
 
5287
    I915_WRITE(VTOTAL(pipe),
5288
           (adjusted_mode->crtc_vdisplay - 1) |
5289
           ((adjusted_mode->crtc_vtotal - 1) << 16));
5290
    I915_WRITE(VBLANK(pipe),
5291
           (adjusted_mode->crtc_vblank_start - 1) |
5292
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
5293
    I915_WRITE(VSYNC(pipe),
5294
           (adjusted_mode->crtc_vsync_start - 1) |
5295
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
5296
 
5297
    /* pipesrc and dspsize control the size that is scaled from,
5298
     * which should always be the user's requested size.
5299
     */
5300
    I915_WRITE(DSPSIZE(plane),
5301
           ((mode->vdisplay - 1) << 16) |
5302
           (mode->hdisplay - 1));
5303
    I915_WRITE(DSPPOS(plane), 0);
5304
    I915_WRITE(PIPESRC(pipe),
5305
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5306
 
5307
    I915_WRITE(PIPECONF(pipe), pipeconf);
5308
    POSTING_READ(PIPECONF(pipe));
5309
    intel_enable_pipe(dev_priv, pipe, false);
5310
 
5311
    intel_wait_for_vblank(dev, pipe);
5312
 
5313
    I915_WRITE(DSPCNTR(plane), dspcntr);
5314
    POSTING_READ(DSPCNTR(plane));
5315
    intel_enable_plane(dev_priv, plane, pipe);
5316
 
5317
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
5318
 
5319
    intel_update_watermarks(dev);
5320
 
5321
    return ret;
5322
}
5323
 
2342 Serge 5324
/*
5325
 * Initialize reference clocks when the driver loads
5326
 */
5327
void ironlake_init_pch_refclk(struct drm_device *dev)
2327 Serge 5328
{
5329
	struct drm_i915_private *dev_priv = dev->dev_private;
5330
	struct drm_mode_config *mode_config = &dev->mode_config;
5331
	struct intel_encoder *encoder;
5332
	u32 temp;
5333
	bool has_lvds = false;
2342 Serge 5334
	bool has_cpu_edp = false;
5335
	bool has_pch_edp = false;
5336
	bool has_panel = false;
5337
	bool has_ck505 = false;
5338
	bool can_ssc = false;
2327 Serge 5339
 
5340
	/* We need to take the global config into account */
5341
		list_for_each_entry(encoder, &mode_config->encoder_list,
5342
				    base.head) {
5343
			switch (encoder->type) {
5344
			case INTEL_OUTPUT_LVDS:
2342 Serge 5345
			has_panel = true;
2327 Serge 5346
				has_lvds = true;
2342 Serge 5347
			break;
2327 Serge 5348
			case INTEL_OUTPUT_EDP:
2342 Serge 5349
			has_panel = true;
5350
			if (intel_encoder_is_pch_edp(&encoder->base))
5351
				has_pch_edp = true;
5352
			else
5353
				has_cpu_edp = true;
2327 Serge 5354
				break;
5355
			}
5356
		}
2342 Serge 5357
 
5358
	if (HAS_PCH_IBX(dev)) {
5359
		has_ck505 = dev_priv->display_clock_mode;
5360
		can_ssc = has_ck505;
5361
	} else {
5362
		has_ck505 = false;
5363
		can_ssc = true;
2327 Serge 5364
	}
5365
 
2342 Serge 5366
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5367
		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5368
		      has_ck505);
5369
 
2327 Serge 5370
	/* Ironlake: try to setup display ref clock before DPLL
5371
	 * enabling. This is only under driver's control after
5372
	 * PCH B stepping, previous chipset stepping should be
5373
	 * ignoring this setting.
5374
	 */
5375
	temp = I915_READ(PCH_DREF_CONTROL);
5376
	/* Always enable nonspread source */
5377
	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
2342 Serge 5378
 
5379
	if (has_ck505)
5380
		temp |= DREF_NONSPREAD_CK505_ENABLE;
5381
	else
2327 Serge 5382
	temp |= DREF_NONSPREAD_SOURCE_ENABLE;
2342 Serge 5383
 
5384
	if (has_panel) {
2327 Serge 5385
	temp &= ~DREF_SSC_SOURCE_MASK;
5386
	temp |= DREF_SSC_SOURCE_ENABLE;
5387
 
2342 Serge 5388
		/* SSC must be turned on before enabling the CPU output  */
5389
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5390
			DRM_DEBUG_KMS("Using SSC on panel\n");
5391
			temp |= DREF_SSC1_ENABLE;
5392
		}
2327 Serge 5393
 
2342 Serge 5394
		/* Get SSC going before enabling the outputs */
2327 Serge 5395
			I915_WRITE(PCH_DREF_CONTROL, temp);
5396
			POSTING_READ(PCH_DREF_CONTROL);
5397
			udelay(200);
2342 Serge 5398
 
2327 Serge 5399
		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5400
 
5401
		/* Enable CPU source on CPU attached eDP */
2342 Serge 5402
		if (has_cpu_edp) {
5403
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5404
				DRM_DEBUG_KMS("Using SSC on eDP\n");
2327 Serge 5405
				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
2342 Serge 5406
			}
2327 Serge 5407
			else
5408
				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2342 Serge 5409
		} else
5410
			temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5411
 
5412
		I915_WRITE(PCH_DREF_CONTROL, temp);
5413
		POSTING_READ(PCH_DREF_CONTROL);
5414
		udelay(200);
2327 Serge 5415
		} else {
2342 Serge 5416
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
5417
 
5418
		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5419
 
5420
		/* Turn off CPU output */
5421
		temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5422
 
2327 Serge 5423
		I915_WRITE(PCH_DREF_CONTROL, temp);
5424
		POSTING_READ(PCH_DREF_CONTROL);
5425
		udelay(200);
2342 Serge 5426
 
5427
		/* Turn off the SSC source */
5428
		temp &= ~DREF_SSC_SOURCE_MASK;
5429
		temp |= DREF_SSC_SOURCE_DISABLE;
5430
 
5431
		/* Turn off SSC1 */
5432
		temp &= ~ DREF_SSC1_ENABLE;
5433
 
5434
		I915_WRITE(PCH_DREF_CONTROL, temp);
5435
		POSTING_READ(PCH_DREF_CONTROL);
5436
		udelay(200);
2327 Serge 5437
	}
5438
}
5439
 
2342 Serge 5440
static int ironlake_get_refclk(struct drm_crtc *crtc)
5441
{
5442
	struct drm_device *dev = crtc->dev;
5443
	struct drm_i915_private *dev_priv = dev->dev_private;
5444
	struct intel_encoder *encoder;
5445
	struct drm_mode_config *mode_config = &dev->mode_config;
5446
	struct intel_encoder *edp_encoder = NULL;
5447
	int num_connectors = 0;
5448
	bool is_lvds = false;
5449
 
5450
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5451
		if (encoder->base.crtc != crtc)
5452
			continue;
5453
 
5454
		switch (encoder->type) {
5455
		case INTEL_OUTPUT_LVDS:
5456
			is_lvds = true;
5457
			break;
5458
		case INTEL_OUTPUT_EDP:
5459
			edp_encoder = encoder;
5460
			break;
5461
		}
5462
		num_connectors++;
5463
	}
5464
 
5465
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5466
		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5467
			      dev_priv->lvds_ssc_freq);
5468
		return dev_priv->lvds_ssc_freq * 1000;
5469
	}
5470
 
5471
	return 120000;
5472
}
5473
 
2327 Serge 5474
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5475
                  struct drm_display_mode *mode,
5476
                  struct drm_display_mode *adjusted_mode,
5477
                  int x, int y,
5478
                  struct drm_framebuffer *old_fb)
5479
{
5480
    struct drm_device *dev = crtc->dev;
5481
    struct drm_i915_private *dev_priv = dev->dev_private;
5482
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5483
    int pipe = intel_crtc->pipe;
5484
    int plane = intel_crtc->plane;
5485
    int refclk, num_connectors = 0;
5486
    intel_clock_t clock, reduced_clock;
5487
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5488
    bool ok, has_reduced_clock = false, is_sdvo = false;
5489
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5490
    struct intel_encoder *has_edp_encoder = NULL;
5491
    struct drm_mode_config *mode_config = &dev->mode_config;
5492
    struct intel_encoder *encoder;
5493
    const intel_limit_t *limit;
5494
    int ret;
5495
    struct fdi_m_n m_n = {0};
5496
    u32 temp;
5497
    u32 lvds_sync = 0;
5498
    int target_clock, pixel_multiplier, lane, link_bw, factor;
5499
    unsigned int pipe_bpp;
5500
    bool dither;
5501
 
2336 Serge 5502
    ENTER();
5503
 
2327 Serge 5504
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5505
        if (encoder->base.crtc != crtc)
5506
            continue;
5507
 
5508
        switch (encoder->type) {
5509
        case INTEL_OUTPUT_LVDS:
5510
            is_lvds = true;
5511
            break;
5512
        case INTEL_OUTPUT_SDVO:
5513
        case INTEL_OUTPUT_HDMI:
5514
            is_sdvo = true;
5515
            if (encoder->needs_tv_clock)
5516
                is_tv = true;
5517
            break;
5518
        case INTEL_OUTPUT_TVOUT:
5519
            is_tv = true;
5520
            break;
5521
        case INTEL_OUTPUT_ANALOG:
5522
            is_crt = true;
5523
            break;
5524
        case INTEL_OUTPUT_DISPLAYPORT:
5525
            is_dp = true;
5526
            break;
5527
        case INTEL_OUTPUT_EDP:
5528
            has_edp_encoder = encoder;
5529
            break;
5530
        }
5531
 
5532
        num_connectors++;
5533
    }
5534
 
2342 Serge 5535
	refclk = ironlake_get_refclk(crtc);
2327 Serge 5536
 
5537
    /*
5538
     * Returns a set of divisors for the desired target clock with the given
5539
     * refclk, or FALSE.  The returned values represent the clock equation:
5540
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5541
     */
5542
    limit = intel_limit(crtc, refclk);
5543
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
5544
    if (!ok) {
5545
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
5546
        return -EINVAL;
5547
    }
5548
 
5549
    /* Ensure that the cursor is valid for the new mode before changing... */
5550
//    intel_crtc_update_cursor(crtc, true);
5551
 
5552
    if (is_lvds && dev_priv->lvds_downclock_avail) {
5553
        has_reduced_clock = limit->find_pll(limit, crtc,
5554
                            dev_priv->lvds_downclock,
5555
                            refclk,
5556
                            &reduced_clock);
5557
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
5558
            /*
5559
             * If the different P is found, it means that we can't
5560
             * switch the display clock by using the FP0/FP1.
5561
             * In such case we will disable the LVDS downclock
5562
             * feature.
5563
             */
5564
            DRM_DEBUG_KMS("Different P is found for "
5565
                      "LVDS clock/downclock\n");
5566
            has_reduced_clock = 0;
5567
        }
5568
    }
5569
    /* SDVO TV has fixed PLL values depend on its clock range,
5570
       this mirrors vbios setting. */
5571
    if (is_sdvo && is_tv) {
5572
        if (adjusted_mode->clock >= 100000
5573
            && adjusted_mode->clock < 140500) {
5574
            clock.p1 = 2;
5575
            clock.p2 = 10;
5576
            clock.n = 3;
5577
            clock.m1 = 16;
5578
            clock.m2 = 8;
5579
        } else if (adjusted_mode->clock >= 140500
5580
               && adjusted_mode->clock <= 200000) {
5581
            clock.p1 = 1;
5582
            clock.p2 = 10;
5583
            clock.n = 6;
5584
            clock.m1 = 12;
5585
            clock.m2 = 8;
5586
        }
5587
    }
5588
 
5589
    /* FDI link */
5590
    pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5591
    lane = 0;
5592
    /* CPU eDP doesn't require FDI link, so just set DP M/N
5593
       according to current link config */
5594
    if (has_edp_encoder &&
5595
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5596
        target_clock = mode->clock;
5597
        intel_edp_link_config(has_edp_encoder,
5598
                      &lane, &link_bw);
5599
    } else {
5600
        /* [e]DP over FDI requires target mode clock
5601
           instead of link clock */
5602
        if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5603
            target_clock = mode->clock;
5604
        else
5605
            target_clock = adjusted_mode->clock;
5606
 
5607
        /* FDI is a binary signal running at ~2.7GHz, encoding
5608
         * each output octet as 10 bits. The actual frequency
5609
         * is stored as a divider into a 100MHz clock, and the
5610
         * mode pixel clock is stored in units of 1KHz.
5611
         * Hence the bw of each lane in terms of the mode signal
5612
         * is:
5613
         */
5614
        link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5615
    }
5616
 
5617
    /* determine panel color depth */
5618
    temp = I915_READ(PIPECONF(pipe));
5619
    temp &= ~PIPE_BPC_MASK;
2342 Serge 5620
	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
2327 Serge 5621
    switch (pipe_bpp) {
5622
    case 18:
5623
        temp |= PIPE_6BPC;
5624
        break;
5625
    case 24:
5626
        temp |= PIPE_8BPC;
5627
        break;
5628
    case 30:
5629
        temp |= PIPE_10BPC;
5630
        break;
5631
    case 36:
5632
        temp |= PIPE_12BPC;
5633
        break;
5634
    default:
5635
        WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5636
            pipe_bpp);
5637
        temp |= PIPE_8BPC;
5638
        pipe_bpp = 24;
5639
        break;
5640
    }
5641
 
5642
    intel_crtc->bpp = pipe_bpp;
5643
    I915_WRITE(PIPECONF(pipe), temp);
5644
 
5645
    if (!lane) {
5646
        /*
5647
         * Account for spread spectrum to avoid
5648
         * oversubscribing the link. Max center spread
5649
         * is 2.5%; use 5% for safety's sake.
5650
         */
5651
        u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5652
        lane = bps / (link_bw * 8) + 1;
5653
    }
5654
 
5655
    intel_crtc->fdi_lanes = lane;
5656
 
5657
    if (pixel_multiplier > 1)
5658
        link_bw *= pixel_multiplier;
5659
    ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5660
                 &m_n);
5661
 
5662
    fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5663
    if (has_reduced_clock)
5664
        fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5665
            reduced_clock.m2;
5666
 
5667
    /* Enable autotuning of the PLL clock (if permissible) */
5668
    factor = 21;
5669
    if (is_lvds) {
5670
        if ((intel_panel_use_ssc(dev_priv) &&
5671
             dev_priv->lvds_ssc_freq == 100) ||
5672
            (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5673
            factor = 25;
5674
    } else if (is_sdvo && is_tv)
5675
        factor = 20;
5676
 
5677
    if (clock.m < factor * clock.n)
5678
        fp |= FP_CB_TUNE;
5679
 
5680
    dpll = 0;
5681
 
5682
    if (is_lvds)
5683
        dpll |= DPLLB_MODE_LVDS;
5684
    else
5685
        dpll |= DPLLB_MODE_DAC_SERIAL;
5686
    if (is_sdvo) {
5687
        int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5688
        if (pixel_multiplier > 1) {
5689
            dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5690
        }
5691
        dpll |= DPLL_DVO_HIGH_SPEED;
5692
    }
5693
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5694
        dpll |= DPLL_DVO_HIGH_SPEED;
5695
 
5696
    /* compute bitmask from p1 value */
5697
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5698
    /* also FPA1 */
5699
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5700
 
5701
    switch (clock.p2) {
5702
    case 5:
5703
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5704
        break;
5705
    case 7:
5706
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5707
        break;
5708
    case 10:
5709
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5710
        break;
5711
    case 14:
5712
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5713
        break;
5714
    }
5715
 
5716
    if (is_sdvo && is_tv)
5717
        dpll |= PLL_REF_INPUT_TVCLKINBC;
5718
    else if (is_tv)
5719
        /* XXX: just matching BIOS for now */
5720
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
5721
        dpll |= 3;
5722
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5723
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5724
    else
5725
        dpll |= PLL_REF_INPUT_DREFCLK;
5726
 
5727
    /* setup pipeconf */
5728
    pipeconf = I915_READ(PIPECONF(pipe));
5729
 
5730
    /* Set up the display plane register */
5731
    dspcntr = DISPPLANE_GAMMA_ENABLE;
5732
 
2342 Serge 5733
	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
2327 Serge 5734
    drm_mode_debug_printmodeline(mode);
5735
 
5736
    /* PCH eDP needs FDI, but CPU eDP does not */
2342 Serge 5737
	if (!intel_crtc->no_pll) {
5738
		if (!has_edp_encoder ||
5739
		    intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
2327 Serge 5740
        I915_WRITE(PCH_FP0(pipe), fp);
5741
        I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5742
 
5743
        POSTING_READ(PCH_DPLL(pipe));
5744
        udelay(150);
5745
    }
2342 Serge 5746
	} else {
5747
		if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5748
		    fp == I915_READ(PCH_FP0(0))) {
5749
			intel_crtc->use_pll_a = true;
5750
			DRM_DEBUG_KMS("using pipe a dpll\n");
5751
		} else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5752
			   fp == I915_READ(PCH_FP0(1))) {
5753
			intel_crtc->use_pll_a = false;
5754
			DRM_DEBUG_KMS("using pipe b dpll\n");
5755
		} else {
5756
			DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5757
			return -EINVAL;
2327 Serge 5758
        }
5759
    }
5760
 
5761
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5762
     * This is an exception to the general rule that mode_set doesn't turn
5763
     * things on.
5764
     */
5765
    if (is_lvds) {
5766
        temp = I915_READ(PCH_LVDS);
5767
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
2342 Serge 5768
		if (HAS_PCH_CPT(dev)) {
5769
			temp &= ~PORT_TRANS_SEL_MASK;
5770
			temp |= PORT_TRANS_SEL_CPT(pipe);
5771
		} else {
5772
			if (pipe == 1)
2327 Serge 5773
                temp |= LVDS_PIPEB_SELECT;
5774
            else
5775
                temp &= ~LVDS_PIPEB_SELECT;
5776
        }
2342 Serge 5777
 
2327 Serge 5778
        /* set the corresponsding LVDS_BORDER bit */
5779
        temp |= dev_priv->lvds_border_bits;
5780
        /* Set the B0-B3 data pairs corresponding to whether we're going to
5781
         * set the DPLLs for dual-channel mode or not.
5782
         */
5783
        if (clock.p2 == 7)
5784
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5785
        else
5786
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5787
 
5788
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5789
         * appropriately here, but we need to look more thoroughly into how
5790
         * panels behave in the two modes.
5791
         */
5792
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5793
            lvds_sync |= LVDS_HSYNC_POLARITY;
5794
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5795
            lvds_sync |= LVDS_VSYNC_POLARITY;
5796
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5797
            != lvds_sync) {
5798
            char flags[2] = "-+";
5799
            DRM_INFO("Changing LVDS panel from "
5800
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5801
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
5802
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
5803
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5804
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5805
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5806
            temp |= lvds_sync;
5807
        }
5808
        I915_WRITE(PCH_LVDS, temp);
5809
    }
5810
 
5811
    pipeconf &= ~PIPECONF_DITHER_EN;
5812
    pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5813
    if ((is_lvds && dev_priv->lvds_dither) || dither) {
5814
        pipeconf |= PIPECONF_DITHER_EN;
2342 Serge 5815
		pipeconf |= PIPECONF_DITHER_TYPE_SP;
2327 Serge 5816
    }
5817
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5818
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
5819
    } else {
5820
        /* For non-DP output, clear any trans DP clock recovery setting.*/
5821
        I915_WRITE(TRANSDATA_M1(pipe), 0);
5822
        I915_WRITE(TRANSDATA_N1(pipe), 0);
5823
        I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5824
        I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5825
    }
5826
 
2342 Serge 5827
	if (!intel_crtc->no_pll &&
5828
	    (!has_edp_encoder ||
5829
	     intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
2327 Serge 5830
        I915_WRITE(PCH_DPLL(pipe), dpll);
5831
 
5832
        /* Wait for the clocks to stabilize. */
5833
        POSTING_READ(PCH_DPLL(pipe));
5834
        udelay(150);
5835
 
5836
        /* The pixel multiplier can only be updated once the
5837
         * DPLL is enabled and the clocks are stable.
5838
         *
5839
         * So write it again.
5840
         */
5841
        I915_WRITE(PCH_DPLL(pipe), dpll);
5842
    }
5843
 
5844
    intel_crtc->lowfreq_avail = false;
2342 Serge 5845
	if (!intel_crtc->no_pll) {
2327 Serge 5846
    if (is_lvds && has_reduced_clock && i915_powersave) {
5847
        I915_WRITE(PCH_FP1(pipe), fp2);
5848
        intel_crtc->lowfreq_avail = true;
5849
        if (HAS_PIPE_CXSR(dev)) {
5850
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5851
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5852
        }
5853
    } else {
5854
        I915_WRITE(PCH_FP1(pipe), fp);
5855
        if (HAS_PIPE_CXSR(dev)) {
5856
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5857
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5858
        }
5859
    }
2342 Serge 5860
	}
2327 Serge 5861
 
2360 Serge 5862
	pipeconf &= ~PIPECONF_INTERLACE_MASK;
2327 Serge 5863
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5864
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5865
        /* the chip adds 2 halflines automatically */
5866
        adjusted_mode->crtc_vdisplay -= 1;
5867
        adjusted_mode->crtc_vtotal -= 1;
5868
        adjusted_mode->crtc_vblank_start -= 1;
5869
        adjusted_mode->crtc_vblank_end -= 1;
5870
        adjusted_mode->crtc_vsync_end -= 1;
5871
        adjusted_mode->crtc_vsync_start -= 1;
5872
    } else
2360 Serge 5873
		pipeconf |= PIPECONF_PROGRESSIVE;
2327 Serge 5874
 
5875
    I915_WRITE(HTOTAL(pipe),
5876
           (adjusted_mode->crtc_hdisplay - 1) |
5877
           ((adjusted_mode->crtc_htotal - 1) << 16));
5878
    I915_WRITE(HBLANK(pipe),
5879
           (adjusted_mode->crtc_hblank_start - 1) |
5880
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
5881
    I915_WRITE(HSYNC(pipe),
5882
           (adjusted_mode->crtc_hsync_start - 1) |
5883
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
5884
 
5885
    I915_WRITE(VTOTAL(pipe),
5886
           (adjusted_mode->crtc_vdisplay - 1) |
5887
           ((adjusted_mode->crtc_vtotal - 1) << 16));
5888
    I915_WRITE(VBLANK(pipe),
5889
           (adjusted_mode->crtc_vblank_start - 1) |
5890
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
5891
    I915_WRITE(VSYNC(pipe),
5892
           (adjusted_mode->crtc_vsync_start - 1) |
5893
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
5894
 
5895
    /* pipesrc controls the size that is scaled from, which should
5896
     * always be the user's requested size.
5897
     */
5898
    I915_WRITE(PIPESRC(pipe),
5899
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5900
 
5901
    I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5902
    I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5903
    I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5904
    I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5905
 
5906
    if (has_edp_encoder &&
5907
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5908
        ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5909
    }
5910
 
5911
    I915_WRITE(PIPECONF(pipe), pipeconf);
5912
    POSTING_READ(PIPECONF(pipe));
5913
 
5914
    intel_wait_for_vblank(dev, pipe);
5915
 
5916
    if (IS_GEN5(dev)) {
5917
        /* enable address swizzle for tiling buffer */
5918
        temp = I915_READ(DISP_ARB_CTL);
5919
        I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
5920
    }
5921
 
5922
    I915_WRITE(DSPCNTR(plane), dspcntr);
5923
    POSTING_READ(DSPCNTR(plane));
5924
 
5925
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
5926
 
2336 Serge 5927
    dbgprintf("Set base\n");
5928
 
2327 Serge 5929
    intel_update_watermarks(dev);
5930
 
2336 Serge 5931
    LEAVE();
5932
 
2327 Serge 5933
    return ret;
5934
}
5935
 
2330 Serge 5936
static int intel_crtc_mode_set(struct drm_crtc *crtc,
5937
			       struct drm_display_mode *mode,
5938
			       struct drm_display_mode *adjusted_mode,
5939
			       int x, int y,
5940
			       struct drm_framebuffer *old_fb)
5941
{
5942
	struct drm_device *dev = crtc->dev;
5943
	struct drm_i915_private *dev_priv = dev->dev_private;
5944
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5945
	int pipe = intel_crtc->pipe;
5946
	int ret;
2327 Serge 5947
 
2330 Serge 5948
//	drm_vblank_pre_modeset(dev, pipe);
2336 Serge 5949
    ENTER();
2327 Serge 5950
 
2330 Serge 5951
	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5952
					      x, y, old_fb);
2327 Serge 5953
 
2330 Serge 5954
//	drm_vblank_post_modeset(dev, pipe);
2327 Serge 5955
 
2330 Serge 5956
	intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
2336 Serge 5957
    LEAVE();
2327 Serge 5958
 
2330 Serge 5959
	return ret;
5960
}
2327 Serge 5961
 
2342 Serge 5962
static bool intel_eld_uptodate(struct drm_connector *connector,
5963
			       int reg_eldv, uint32_t bits_eldv,
5964
			       int reg_elda, uint32_t bits_elda,
5965
			       int reg_edid)
5966
{
5967
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
5968
	uint8_t *eld = connector->eld;
5969
	uint32_t i;
5970
 
5971
	i = I915_READ(reg_eldv);
5972
	i &= bits_eldv;
5973
 
5974
	if (!eld[0])
5975
		return !i;
5976
 
5977
	if (!i)
5978
		return false;
5979
 
5980
	i = I915_READ(reg_elda);
5981
	i &= ~bits_elda;
5982
	I915_WRITE(reg_elda, i);
5983
 
5984
	for (i = 0; i < eld[2]; i++)
5985
		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
5986
			return false;
5987
 
5988
	return true;
5989
}
5990
 
5991
static void g4x_write_eld(struct drm_connector *connector,
5992
			  struct drm_crtc *crtc)
5993
{
5994
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
5995
	uint8_t *eld = connector->eld;
5996
	uint32_t eldv;
5997
	uint32_t len;
5998
	uint32_t i;
5999
 
6000
	i = I915_READ(G4X_AUD_VID_DID);
6001
 
6002
	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6003
		eldv = G4X_ELDV_DEVCL_DEVBLC;
6004
	else
6005
		eldv = G4X_ELDV_DEVCTG;
6006
 
6007
	if (intel_eld_uptodate(connector,
6008
			       G4X_AUD_CNTL_ST, eldv,
6009
			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6010
			       G4X_HDMIW_HDMIEDID))
6011
		return;
6012
 
6013
	i = I915_READ(G4X_AUD_CNTL_ST);
6014
	i &= ~(eldv | G4X_ELD_ADDR);
6015
	len = (i >> 9) & 0x1f;		/* ELD buffer size */
6016
	I915_WRITE(G4X_AUD_CNTL_ST, i);
6017
 
6018
	if (!eld[0])
6019
		return;
6020
 
6021
	len = min_t(uint8_t, eld[2], len);
6022
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6023
	for (i = 0; i < len; i++)
6024
		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6025
 
6026
	i = I915_READ(G4X_AUD_CNTL_ST);
6027
	i |= eldv;
6028
	I915_WRITE(G4X_AUD_CNTL_ST, i);
6029
}
6030
 
6031
static void ironlake_write_eld(struct drm_connector *connector,
6032
				     struct drm_crtc *crtc)
6033
{
6034
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6035
	uint8_t *eld = connector->eld;
6036
	uint32_t eldv;
6037
	uint32_t i;
6038
	int len;
6039
	int hdmiw_hdmiedid;
6040
	int aud_cntl_st;
6041
	int aud_cntrl_st2;
6042
 
6043
	if (HAS_PCH_IBX(connector->dev)) {
6044
		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
6045
		aud_cntl_st = IBX_AUD_CNTL_ST_A;
6046
		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6047
	} else {
6048
		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
6049
		aud_cntl_st = CPT_AUD_CNTL_ST_A;
6050
		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6051
	}
6052
 
6053
	i = to_intel_crtc(crtc)->pipe;
6054
	hdmiw_hdmiedid += i * 0x100;
6055
	aud_cntl_st += i * 0x100;
6056
 
6057
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
6058
 
6059
	i = I915_READ(aud_cntl_st);
6060
	i = (i >> 29) & 0x3;		/* DIP_Port_Select, 0x1 = PortB */
6061
	if (!i) {
6062
		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6063
		/* operate blindly on all ports */
6064
		eldv = IBX_ELD_VALIDB;
6065
		eldv |= IBX_ELD_VALIDB << 4;
6066
		eldv |= IBX_ELD_VALIDB << 8;
6067
	} else {
6068
		DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
6069
		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6070
	}
6071
 
6072
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6073
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6074
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
6075
	}
6076
 
6077
	if (intel_eld_uptodate(connector,
6078
			       aud_cntrl_st2, eldv,
6079
			       aud_cntl_st, IBX_ELD_ADDRESS,
6080
			       hdmiw_hdmiedid))
6081
		return;
6082
 
6083
	i = I915_READ(aud_cntrl_st2);
6084
	i &= ~eldv;
6085
	I915_WRITE(aud_cntrl_st2, i);
6086
 
6087
	if (!eld[0])
6088
		return;
6089
 
6090
	i = I915_READ(aud_cntl_st);
6091
	i &= ~IBX_ELD_ADDRESS;
6092
	I915_WRITE(aud_cntl_st, i);
6093
 
6094
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
6095
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6096
	for (i = 0; i < len; i++)
6097
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6098
 
6099
	i = I915_READ(aud_cntrl_st2);
6100
	i |= eldv;
6101
	I915_WRITE(aud_cntrl_st2, i);
6102
}
6103
 
6104
void intel_write_eld(struct drm_encoder *encoder,
6105
		     struct drm_display_mode *mode)
6106
{
6107
	struct drm_crtc *crtc = encoder->crtc;
6108
	struct drm_connector *connector;
6109
	struct drm_device *dev = encoder->dev;
6110
	struct drm_i915_private *dev_priv = dev->dev_private;
6111
 
6112
	connector = drm_select_eld(encoder, mode);
6113
	if (!connector)
6114
		return;
6115
 
6116
	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6117
			 connector->base.id,
6118
			 drm_get_connector_name(connector),
6119
			 connector->encoder->base.id,
6120
			 drm_get_encoder_name(connector->encoder));
6121
 
6122
	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6123
 
6124
	if (dev_priv->display.write_eld)
6125
		dev_priv->display.write_eld(connector, crtc);
6126
}
6127
 
2327 Serge 6128
/** Loads the palette/gamma unit for the CRTC with the prepared values */
6129
void intel_crtc_load_lut(struct drm_crtc *crtc)
6130
{
6131
	struct drm_device *dev = crtc->dev;
6132
	struct drm_i915_private *dev_priv = dev->dev_private;
6133
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6134
	int palreg = PALETTE(intel_crtc->pipe);
6135
	int i;
6136
 
6137
	/* The clocks have to be on to load the palette. */
6138
	if (!crtc->enabled)
6139
		return;
6140
 
6141
	/* use legacy palette for Ironlake */
6142
	if (HAS_PCH_SPLIT(dev))
6143
		palreg = LGC_PALETTE(intel_crtc->pipe);
6144
 
6145
	for (i = 0; i < 256; i++) {
6146
		I915_WRITE(palreg + 4 * i,
6147
			   (intel_crtc->lut_r[i] << 16) |
6148
			   (intel_crtc->lut_g[i] << 8) |
6149
			   intel_crtc->lut_b[i]);
6150
	}
6151
}
6152
 
6153
 
6154
 
6155
 
6156
 
6157
 
6158
 
6159
 
6160
 
6161
 
6162
 
6163
 
6164
 
6165
 
6166
 
6167
 
6168
 
6169
 
6170
 
6171
 
6172
 
6173
 
6174
 
6175
 
6176
 
6177
 
6178
 
6179
 
6180
 
6181
 
6182
 
6183
 
6184
 
6185
 
6186
 
6187
 
6188
 
2332 Serge 6189
/** Sets the color ramps on behalf of RandR */
6190
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6191
				 u16 blue, int regno)
6192
{
6193
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6194
 
2332 Serge 6195
	intel_crtc->lut_r[regno] = red >> 8;
6196
	intel_crtc->lut_g[regno] = green >> 8;
6197
	intel_crtc->lut_b[regno] = blue >> 8;
6198
}
2327 Serge 6199
 
2332 Serge 6200
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6201
			     u16 *blue, int regno)
6202
{
6203
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6204
 
2332 Serge 6205
	*red = intel_crtc->lut_r[regno] << 8;
6206
	*green = intel_crtc->lut_g[regno] << 8;
6207
	*blue = intel_crtc->lut_b[regno] << 8;
6208
}
2327 Serge 6209
 
2330 Serge 6210
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6211
				 u16 *blue, uint32_t start, uint32_t size)
6212
{
6213
	int end = (start + size > 256) ? 256 : start + size, i;
6214
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6215
 
2330 Serge 6216
	for (i = start; i < end; i++) {
6217
		intel_crtc->lut_r[i] = red[i] >> 8;
6218
		intel_crtc->lut_g[i] = green[i] >> 8;
6219
		intel_crtc->lut_b[i] = blue[i] >> 8;
6220
	}
2327 Serge 6221
 
2330 Serge 6222
	intel_crtc_load_lut(crtc);
6223
}
2327 Serge 6224
 
2330 Serge 6225
/**
6226
 * Get a pipe with a simple mode set on it for doing load-based monitor
6227
 * detection.
6228
 *
6229
 * It will be up to the load-detect code to adjust the pipe as appropriate for
6230
 * its requirements.  The pipe will be connected to no other encoders.
6231
 *
6232
 * Currently this code will only succeed if there is a pipe with no encoders
6233
 * configured for it.  In the future, it could choose to temporarily disable
6234
 * some outputs to free up a pipe for its use.
6235
 *
6236
 * \return crtc, or NULL if no pipes are available.
6237
 */
2327 Serge 6238
 
2330 Serge 6239
/* VESA 640x480x72Hz mode to set on the pipe */
6240
static struct drm_display_mode load_detect_mode = {
6241
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6242
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6243
};
2327 Serge 6244
 
6245
 
6246
 
6247
 
6248
 
2330 Serge 6249
static u32
6250
intel_framebuffer_pitch_for_width(int width, int bpp)
6251
{
6252
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
6253
	return ALIGN(pitch, 64);
6254
}
2327 Serge 6255
 
2330 Serge 6256
static u32
6257
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6258
{
6259
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6260
	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
6261
}
2327 Serge 6262
 
2330 Serge 6263
static struct drm_framebuffer *
6264
intel_framebuffer_create_for_mode(struct drm_device *dev,
6265
				  struct drm_display_mode *mode,
6266
				  int depth, int bpp)
6267
{
6268
	struct drm_i915_gem_object *obj;
2344 Serge 6269
	struct drm_mode_fb_cmd2 mode_cmd;
2327 Serge 6270
 
2330 Serge 6271
//	obj = i915_gem_alloc_object(dev,
6272
//				    intel_framebuffer_size_for_mode(mode, bpp));
6273
//	if (obj == NULL)
6274
		return ERR_PTR(-ENOMEM);
2327 Serge 6275
 
2330 Serge 6276
//	mode_cmd.width = mode->hdisplay;
6277
//	mode_cmd.height = mode->vdisplay;
6278
//	mode_cmd.depth = depth;
6279
//	mode_cmd.bpp = bpp;
6280
//	mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
2327 Serge 6281
 
2330 Serge 6282
//	return intel_framebuffer_create(dev, &mode_cmd, obj);
6283
}
2327 Serge 6284
 
2330 Serge 6285
static struct drm_framebuffer *
6286
mode_fits_in_fbdev(struct drm_device *dev,
6287
		   struct drm_display_mode *mode)
6288
{
6289
	struct drm_i915_private *dev_priv = dev->dev_private;
6290
	struct drm_i915_gem_object *obj;
6291
	struct drm_framebuffer *fb;
2327 Serge 6292
 
2330 Serge 6293
//	if (dev_priv->fbdev == NULL)
6294
//		return NULL;
2327 Serge 6295
 
2330 Serge 6296
//	obj = dev_priv->fbdev->ifb.obj;
6297
//	if (obj == NULL)
6298
//		return NULL;
2327 Serge 6299
 
2330 Serge 6300
//	fb = &dev_priv->fbdev->ifb.base;
6301
//	if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
6302
//							  fb->bits_per_pixel))
6303
		return NULL;
2327 Serge 6304
 
2330 Serge 6305
//	if (obj->base.size < mode->vdisplay * fb->pitch)
6306
//		return NULL;
2327 Serge 6307
 
2330 Serge 6308
//	return fb;
6309
}
2327 Serge 6310
 
2330 Serge 6311
bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
6312
				struct drm_connector *connector,
6313
				struct drm_display_mode *mode,
6314
				struct intel_load_detect_pipe *old)
6315
{
6316
	struct intel_crtc *intel_crtc;
6317
	struct drm_crtc *possible_crtc;
6318
	struct drm_encoder *encoder = &intel_encoder->base;
6319
	struct drm_crtc *crtc = NULL;
6320
	struct drm_device *dev = encoder->dev;
6321
	struct drm_framebuffer *old_fb;
6322
	int i = -1;
2327 Serge 6323
 
2330 Serge 6324
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6325
		      connector->base.id, drm_get_connector_name(connector),
6326
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 6327
 
2330 Serge 6328
	/*
6329
	 * Algorithm gets a little messy:
6330
	 *
6331
	 *   - if the connector already has an assigned crtc, use it (but make
6332
	 *     sure it's on first)
6333
	 *
6334
	 *   - try to find the first unused crtc that can drive this connector,
6335
	 *     and use that if we find one
6336
	 */
2327 Serge 6337
 
2330 Serge 6338
	/* See if we already have a CRTC for this connector */
6339
	if (encoder->crtc) {
6340
		crtc = encoder->crtc;
2327 Serge 6341
 
2330 Serge 6342
		intel_crtc = to_intel_crtc(crtc);
6343
		old->dpms_mode = intel_crtc->dpms_mode;
6344
		old->load_detect_temp = false;
2327 Serge 6345
 
2330 Serge 6346
		/* Make sure the crtc and connector are running */
6347
		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6348
			struct drm_encoder_helper_funcs *encoder_funcs;
6349
			struct drm_crtc_helper_funcs *crtc_funcs;
2327 Serge 6350
 
2330 Serge 6351
			crtc_funcs = crtc->helper_private;
6352
			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
2327 Serge 6353
 
2330 Serge 6354
			encoder_funcs = encoder->helper_private;
6355
			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
6356
		}
2327 Serge 6357
 
2330 Serge 6358
		return true;
6359
	}
2327 Serge 6360
 
2330 Serge 6361
	/* Find an unused one (if possible) */
6362
	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6363
		i++;
6364
		if (!(encoder->possible_crtcs & (1 << i)))
6365
			continue;
6366
		if (!possible_crtc->enabled) {
6367
			crtc = possible_crtc;
6368
			break;
6369
		}
6370
	}
2327 Serge 6371
 
2330 Serge 6372
	/*
6373
	 * If we didn't find an unused CRTC, don't use any.
6374
	 */
6375
	if (!crtc) {
6376
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
6377
		return false;
6378
	}
2327 Serge 6379
 
2330 Serge 6380
	encoder->crtc = crtc;
6381
	connector->encoder = encoder;
2327 Serge 6382
 
2330 Serge 6383
	intel_crtc = to_intel_crtc(crtc);
6384
	old->dpms_mode = intel_crtc->dpms_mode;
6385
	old->load_detect_temp = true;
6386
	old->release_fb = NULL;
2327 Serge 6387
 
2330 Serge 6388
	if (!mode)
6389
		mode = &load_detect_mode;
2327 Serge 6390
 
2330 Serge 6391
	old_fb = crtc->fb;
2327 Serge 6392
 
2330 Serge 6393
	/* We need a framebuffer large enough to accommodate all accesses
6394
	 * that the plane may generate whilst we perform load detection.
6395
	 * We can not rely on the fbcon either being present (we get called
6396
	 * during its initialisation to detect all boot displays, or it may
6397
	 * not even exist) or that it is large enough to satisfy the
6398
	 * requested mode.
6399
	 */
6400
	crtc->fb = mode_fits_in_fbdev(dev, mode);
6401
	if (crtc->fb == NULL) {
6402
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6403
		crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
6404
		old->release_fb = crtc->fb;
6405
	} else
6406
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6407
	if (IS_ERR(crtc->fb)) {
6408
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6409
		crtc->fb = old_fb;
6410
		return false;
6411
	}
2327 Serge 6412
 
2330 Serge 6413
	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6414
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6415
		if (old->release_fb)
6416
			old->release_fb->funcs->destroy(old->release_fb);
6417
		crtc->fb = old_fb;
6418
		return false;
6419
	}
2327 Serge 6420
 
2330 Serge 6421
	/* let the connector get through one full cycle before testing */
6422
	intel_wait_for_vblank(dev, intel_crtc->pipe);
2327 Serge 6423
 
2330 Serge 6424
	return true;
6425
}
2327 Serge 6426
 
2330 Serge 6427
void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
6428
				    struct drm_connector *connector,
6429
				    struct intel_load_detect_pipe *old)
6430
{
6431
	struct drm_encoder *encoder = &intel_encoder->base;
6432
	struct drm_device *dev = encoder->dev;
6433
	struct drm_crtc *crtc = encoder->crtc;
6434
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
6435
	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2327 Serge 6436
 
2330 Serge 6437
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6438
		      connector->base.id, drm_get_connector_name(connector),
6439
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 6440
 
2330 Serge 6441
	if (old->load_detect_temp) {
6442
		connector->encoder = NULL;
6443
		drm_helper_disable_unused_functions(dev);
2327 Serge 6444
 
2330 Serge 6445
		if (old->release_fb)
6446
			old->release_fb->funcs->destroy(old->release_fb);
2327 Serge 6447
 
2330 Serge 6448
		return;
6449
	}
2327 Serge 6450
 
2330 Serge 6451
	/* Switch crtc and encoder back off if necessary */
6452
	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6453
		encoder_funcs->dpms(encoder, old->dpms_mode);
6454
		crtc_funcs->dpms(crtc, old->dpms_mode);
6455
	}
6456
}
2327 Serge 6457
 
2330 Serge 6458
/* Returns the clock of the currently programmed mode of the given pipe. */
6459
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6460
{
6461
	struct drm_i915_private *dev_priv = dev->dev_private;
6462
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6463
	int pipe = intel_crtc->pipe;
6464
	u32 dpll = I915_READ(DPLL(pipe));
6465
	u32 fp;
6466
	intel_clock_t clock;
2327 Serge 6467
 
2330 Serge 6468
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6469
		fp = I915_READ(FP0(pipe));
6470
	else
6471
		fp = I915_READ(FP1(pipe));
2327 Serge 6472
 
2330 Serge 6473
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6474
	if (IS_PINEVIEW(dev)) {
6475
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6476
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6477
	} else {
6478
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6479
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6480
	}
2327 Serge 6481
 
2330 Serge 6482
	if (!IS_GEN2(dev)) {
6483
		if (IS_PINEVIEW(dev))
6484
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6485
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6486
		else
6487
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6488
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 6489
 
2330 Serge 6490
		switch (dpll & DPLL_MODE_MASK) {
6491
		case DPLLB_MODE_DAC_SERIAL:
6492
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6493
				5 : 10;
6494
			break;
6495
		case DPLLB_MODE_LVDS:
6496
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6497
				7 : 14;
6498
			break;
6499
		default:
6500
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6501
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
6502
			return 0;
6503
		}
2327 Serge 6504
 
2330 Serge 6505
		/* XXX: Handle the 100Mhz refclk */
6506
		intel_clock(dev, 96000, &clock);
6507
	} else {
6508
		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
2327 Serge 6509
 
2330 Serge 6510
		if (is_lvds) {
6511
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6512
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6513
			clock.p2 = 14;
2327 Serge 6514
 
2330 Serge 6515
			if ((dpll & PLL_REF_INPUT_MASK) ==
6516
			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6517
				/* XXX: might not be 66MHz */
6518
				intel_clock(dev, 66000, &clock);
6519
			} else
6520
				intel_clock(dev, 48000, &clock);
6521
		} else {
6522
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6523
				clock.p1 = 2;
6524
			else {
6525
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6526
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6527
			}
6528
			if (dpll & PLL_P2_DIVIDE_BY_4)
6529
				clock.p2 = 4;
6530
			else
6531
				clock.p2 = 2;
2327 Serge 6532
 
2330 Serge 6533
			intel_clock(dev, 48000, &clock);
6534
		}
6535
	}
2327 Serge 6536
 
2330 Serge 6537
	/* XXX: It would be nice to validate the clocks, but we can't reuse
6538
	 * i830PllIsValid() because it relies on the xf86_config connector
6539
	 * configuration being accurate, which it isn't necessarily.
6540
	 */
2327 Serge 6541
 
2330 Serge 6542
	return clock.dot;
6543
}
2327 Serge 6544
 
2330 Serge 6545
/** Returns the currently programmed mode of the given pipe. */
6546
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6547
					     struct drm_crtc *crtc)
6548
{
6549
	struct drm_i915_private *dev_priv = dev->dev_private;
6550
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6551
	int pipe = intel_crtc->pipe;
6552
	struct drm_display_mode *mode;
6553
	int htot = I915_READ(HTOTAL(pipe));
6554
	int hsync = I915_READ(HSYNC(pipe));
6555
	int vtot = I915_READ(VTOTAL(pipe));
6556
	int vsync = I915_READ(VSYNC(pipe));
2327 Serge 6557
 
2330 Serge 6558
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6559
	if (!mode)
6560
		return NULL;
6561
 
6562
	mode->clock = intel_crtc_clock_get(dev, crtc);
6563
	mode->hdisplay = (htot & 0xffff) + 1;
6564
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6565
	mode->hsync_start = (hsync & 0xffff) + 1;
6566
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6567
	mode->vdisplay = (vtot & 0xffff) + 1;
6568
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6569
	mode->vsync_start = (vsync & 0xffff) + 1;
6570
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6571
 
6572
	drm_mode_set_name(mode);
6573
	drm_mode_set_crtcinfo(mode, 0);
6574
 
6575
	return mode;
6576
}
6577
 
6578
#define GPU_IDLE_TIMEOUT 500 /* ms */
6579
 
6580
 
6581
 
6582
 
6583
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
6584
 
6585
 
6586
 
6587
 
2327 Serge 6588
static void intel_increase_pllclock(struct drm_crtc *crtc)
6589
{
6590
	struct drm_device *dev = crtc->dev;
6591
	drm_i915_private_t *dev_priv = dev->dev_private;
6592
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6593
	int pipe = intel_crtc->pipe;
6594
	int dpll_reg = DPLL(pipe);
6595
	int dpll;
6596
 
2336 Serge 6597
    ENTER();
6598
 
2327 Serge 6599
	if (HAS_PCH_SPLIT(dev))
6600
		return;
6601
 
6602
	if (!dev_priv->lvds_downclock_avail)
6603
		return;
6604
 
6605
	dpll = I915_READ(dpll_reg);
6606
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
6607
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
6608
 
6609
		/* Unlock panel regs */
6610
		I915_WRITE(PP_CONTROL,
6611
			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
6612
 
6613
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
6614
		I915_WRITE(dpll_reg, dpll);
6615
		intel_wait_for_vblank(dev, pipe);
6616
 
6617
		dpll = I915_READ(dpll_reg);
6618
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
6619
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
6620
 
6621
		/* ...and lock them again */
6622
		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
6623
	}
6624
 
2336 Serge 6625
    LEAVE();
6626
 
2327 Serge 6627
	/* Schedule downclock */
6628
}
6629
 
6630
 
6631
 
6632
 
6633
 
6634
 
6635
 
6636
 
6637
 
6638
 
6639
 
6640
 
6641
 
6642
 
6643
 
6644
 
6645
 
6646
 
6647
 
6648
 
6649
 
6650
 
2330 Serge 6651
static void intel_crtc_destroy(struct drm_crtc *crtc)
6652
{
6653
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6654
	struct drm_device *dev = crtc->dev;
6655
	struct intel_unpin_work *work;
6656
	unsigned long flags;
2327 Serge 6657
 
2330 Serge 6658
	spin_lock_irqsave(&dev->event_lock, flags);
6659
	work = intel_crtc->unpin_work;
6660
	intel_crtc->unpin_work = NULL;
6661
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 6662
 
2330 Serge 6663
	if (work) {
6664
//		cancel_work_sync(&work->work);
6665
		kfree(work);
6666
	}
2327 Serge 6667
 
2330 Serge 6668
	drm_crtc_cleanup(crtc);
2327 Serge 6669
 
2330 Serge 6670
	kfree(intel_crtc);
6671
}
2327 Serge 6672
 
6673
 
6674
 
6675
 
6676
 
6677
 
6678
 
6679
 
6680
 
6681
 
6682
 
6683
 
6684
 
6685
 
6686
 
6687
 
6688
 
6689
 
6690
 
6691
 
6692
 
6693
 
6694
 
6695
 
6696
 
6697
 
6698
 
6699
 
6700
 
6701
 
6702
 
6703
 
6704
 
6705
 
6706
 
6707
 
6708
 
6709
 
6710
 
6711
 
6712
 
6713
 
6714
 
6715
 
6716
 
6717
 
6718
 
6719
 
6720
 
6721
 
6722
 
6723
 
6724
 
6725
 
6726
 
6727
 
6728
 
6729
 
6730
 
6731
 
6732
 
6733
 
6734
 
6735
 
6736
 
6737
 
2330 Serge 6738
static void intel_sanitize_modesetting(struct drm_device *dev,
6739
				       int pipe, int plane)
6740
{
6741
	struct drm_i915_private *dev_priv = dev->dev_private;
6742
	u32 reg, val;
2327 Serge 6743
 
2330 Serge 6744
	if (HAS_PCH_SPLIT(dev))
6745
		return;
2327 Serge 6746
 
2330 Serge 6747
	/* Who knows what state these registers were left in by the BIOS or
6748
	 * grub?
6749
	 *
6750
	 * If we leave the registers in a conflicting state (e.g. with the
6751
	 * display plane reading from the other pipe than the one we intend
6752
	 * to use) then when we attempt to teardown the active mode, we will
6753
	 * not disable the pipes and planes in the correct order -- leaving
6754
	 * a plane reading from a disabled pipe and possibly leading to
6755
	 * undefined behaviour.
6756
	 */
2327 Serge 6757
 
2330 Serge 6758
	reg = DSPCNTR(plane);
6759
	val = I915_READ(reg);
2327 Serge 6760
 
2330 Serge 6761
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
6762
		return;
6763
	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6764
		return;
2327 Serge 6765
 
2330 Serge 6766
	/* This display plane is active and attached to the other CPU pipe. */
6767
	pipe = !pipe;
2327 Serge 6768
 
2330 Serge 6769
	/* Disable the plane and wait for it to stop reading from the pipe. */
6770
	intel_disable_plane(dev_priv, plane, pipe);
6771
	intel_disable_pipe(dev_priv, pipe);
6772
}
2327 Serge 6773
 
2330 Serge 6774
static void intel_crtc_reset(struct drm_crtc *crtc)
6775
{
6776
	struct drm_device *dev = crtc->dev;
6777
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6778
 
2330 Serge 6779
	/* Reset flags back to the 'unknown' status so that they
6780
	 * will be correctly set on the initial modeset.
6781
	 */
6782
	intel_crtc->dpms_mode = -1;
2327 Serge 6783
 
2330 Serge 6784
	/* We need to fix up any BIOS configuration that conflicts with
6785
	 * our expectations.
6786
	 */
6787
	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6788
}
2327 Serge 6789
 
2330 Serge 6790
static struct drm_crtc_helper_funcs intel_helper_funcs = {
6791
	.dpms = intel_crtc_dpms,
6792
	.mode_fixup = intel_crtc_mode_fixup,
6793
	.mode_set = intel_crtc_mode_set,
6794
	.mode_set_base = intel_pipe_set_base,
6795
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
6796
	.load_lut = intel_crtc_load_lut,
6797
	.disable = intel_crtc_disable,
6798
};
2327 Serge 6799
 
2330 Serge 6800
static const struct drm_crtc_funcs intel_crtc_funcs = {
6801
	.reset = intel_crtc_reset,
6802
//	.cursor_set = intel_crtc_cursor_set,
6803
//	.cursor_move = intel_crtc_cursor_move,
6804
	.gamma_set = intel_crtc_gamma_set,
6805
	.set_config = drm_crtc_helper_set_config,
6806
	.destroy = intel_crtc_destroy,
6807
//	.page_flip = intel_crtc_page_flip,
6808
};
2327 Serge 6809
 
2330 Serge 6810
static void intel_crtc_init(struct drm_device *dev, int pipe)
6811
{
6812
	drm_i915_private_t *dev_priv = dev->dev_private;
6813
	struct intel_crtc *intel_crtc;
6814
	int i;
2327 Serge 6815
 
2330 Serge 6816
	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
6817
	if (intel_crtc == NULL)
6818
		return;
2327 Serge 6819
 
2330 Serge 6820
	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
2327 Serge 6821
 
2330 Serge 6822
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
6823
	for (i = 0; i < 256; i++) {
6824
		intel_crtc->lut_r[i] = i;
6825
		intel_crtc->lut_g[i] = i;
6826
		intel_crtc->lut_b[i] = i;
6827
	}
2327 Serge 6828
 
2330 Serge 6829
	/* Swap pipes & planes for FBC on pre-965 */
6830
	intel_crtc->pipe = pipe;
6831
	intel_crtc->plane = pipe;
6832
	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
6833
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
6834
		intel_crtc->plane = !pipe;
6835
	}
2327 Serge 6836
 
2330 Serge 6837
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
6838
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
6839
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6840
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 6841
 
2330 Serge 6842
	intel_crtc_reset(&intel_crtc->base);
6843
	intel_crtc->active = true; /* force the pipe off on setup_init_config */
6844
	intel_crtc->bpp = 24; /* default for pre-Ironlake */
2327 Serge 6845
 
2330 Serge 6846
	if (HAS_PCH_SPLIT(dev)) {
2342 Serge 6847
		if (pipe == 2 && IS_IVYBRIDGE(dev))
6848
			intel_crtc->no_pll = true;
2330 Serge 6849
		intel_helper_funcs.prepare = ironlake_crtc_prepare;
6850
		intel_helper_funcs.commit = ironlake_crtc_commit;
6851
	} else {
6852
		intel_helper_funcs.prepare = i9xx_crtc_prepare;
6853
		intel_helper_funcs.commit = i9xx_crtc_commit;
6854
	}
2327 Serge 6855
 
2330 Serge 6856
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
2327 Serge 6857
 
2330 Serge 6858
	intel_crtc->busy = false;
2327 Serge 6859
 
2330 Serge 6860
}
2327 Serge 6861
 
6862
 
6863
 
6864
 
6865
 
6866
 
6867
 
2330 Serge 6868
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
6869
{
6870
	struct intel_encoder *encoder;
6871
	int index_mask = 0;
6872
	int entry = 0;
2327 Serge 6873
 
2330 Serge 6874
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6875
		if (type_mask & encoder->clone_mask)
6876
			index_mask |= (1 << entry);
6877
		entry++;
6878
	}
2327 Serge 6879
 
2330 Serge 6880
	return index_mask;
6881
}
2327 Serge 6882
 
2330 Serge 6883
static bool has_edp_a(struct drm_device *dev)
6884
{
6885
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 6886
 
2330 Serge 6887
	if (!IS_MOBILE(dev))
6888
		return false;
2327 Serge 6889
 
2330 Serge 6890
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6891
		return false;
2327 Serge 6892
 
2330 Serge 6893
	if (IS_GEN5(dev) &&
6894
	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6895
		return false;
2327 Serge 6896
 
2330 Serge 6897
	return true;
6898
}
2327 Serge 6899
 
2330 Serge 6900
static void intel_setup_outputs(struct drm_device *dev)
6901
{
6902
	struct drm_i915_private *dev_priv = dev->dev_private;
6903
	struct intel_encoder *encoder;
6904
	bool dpd_is_edp = false;
6905
	bool has_lvds = false;
2327 Serge 6906
 
2336 Serge 6907
    ENTER();
6908
 
2330 Serge 6909
	if (IS_MOBILE(dev) && !IS_I830(dev))
6910
		has_lvds = intel_lvds_init(dev);
6911
	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6912
		/* disable the panel fitter on everything but LVDS */
6913
		I915_WRITE(PFIT_CONTROL, 0);
6914
	}
2327 Serge 6915
 
2330 Serge 6916
	if (HAS_PCH_SPLIT(dev)) {
6917
		dpd_is_edp = intel_dpd_is_edp(dev);
2327 Serge 6918
 
2330 Serge 6919
		if (has_edp_a(dev))
6920
			intel_dp_init(dev, DP_A);
2327 Serge 6921
 
2330 Serge 6922
		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6923
			intel_dp_init(dev, PCH_DP_D);
6924
	}
2327 Serge 6925
 
2330 Serge 6926
	intel_crt_init(dev);
2327 Serge 6927
 
2330 Serge 6928
	if (HAS_PCH_SPLIT(dev)) {
6929
		int found;
2327 Serge 6930
 
2330 Serge 6931
		if (I915_READ(HDMIB) & PORT_DETECTED) {
6932
			/* PCH SDVOB multiplex with HDMIB */
6933
			found = intel_sdvo_init(dev, PCH_SDVOB);
6934
			if (!found)
6935
				intel_hdmi_init(dev, HDMIB);
6936
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6937
				intel_dp_init(dev, PCH_DP_B);
6938
		}
2327 Serge 6939
 
2330 Serge 6940
		if (I915_READ(HDMIC) & PORT_DETECTED)
6941
			intel_hdmi_init(dev, HDMIC);
2327 Serge 6942
 
2330 Serge 6943
		if (I915_READ(HDMID) & PORT_DETECTED)
6944
			intel_hdmi_init(dev, HDMID);
2327 Serge 6945
 
2330 Serge 6946
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
6947
			intel_dp_init(dev, PCH_DP_C);
2327 Serge 6948
 
2330 Serge 6949
		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6950
			intel_dp_init(dev, PCH_DP_D);
2327 Serge 6951
 
2330 Serge 6952
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6953
		bool found = false;
2327 Serge 6954
 
2330 Serge 6955
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6956
			DRM_DEBUG_KMS("probing SDVOB\n");
6957
			found = intel_sdvo_init(dev, SDVOB);
6958
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6959
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6960
				intel_hdmi_init(dev, SDVOB);
6961
			}
2327 Serge 6962
 
2330 Serge 6963
			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6964
				DRM_DEBUG_KMS("probing DP_B\n");
6965
				intel_dp_init(dev, DP_B);
6966
			}
6967
		}
2327 Serge 6968
 
2330 Serge 6969
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 6970
 
2330 Serge 6971
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6972
			DRM_DEBUG_KMS("probing SDVOC\n");
6973
			found = intel_sdvo_init(dev, SDVOC);
6974
		}
2327 Serge 6975
 
2330 Serge 6976
		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
2327 Serge 6977
 
2330 Serge 6978
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6979
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6980
				intel_hdmi_init(dev, SDVOC);
6981
			}
6982
			if (SUPPORTS_INTEGRATED_DP(dev)) {
6983
				DRM_DEBUG_KMS("probing DP_C\n");
6984
				intel_dp_init(dev, DP_C);
6985
			}
6986
		}
2327 Serge 6987
 
2330 Serge 6988
		if (SUPPORTS_INTEGRATED_DP(dev) &&
6989
		    (I915_READ(DP_D) & DP_DETECTED)) {
6990
			DRM_DEBUG_KMS("probing DP_D\n");
6991
			intel_dp_init(dev, DP_D);
6992
		}
6993
	} else if (IS_GEN2(dev))
6994
		intel_dvo_init(dev);
2327 Serge 6995
 
2330 Serge 6996
//   if (SUPPORTS_TV(dev))
6997
//       intel_tv_init(dev);
2327 Serge 6998
 
2330 Serge 6999
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7000
		encoder->base.possible_crtcs = encoder->crtc_mask;
7001
		encoder->base.possible_clones =
7002
			intel_encoder_clones(dev, encoder->clone_mask);
7003
	}
2327 Serge 7004
 
2330 Serge 7005
	/* disable all the possible outputs/crtcs before entering KMS mode */
7006
//	drm_helper_disable_unused_functions(dev);
2336 Serge 7007
 
2342 Serge 7008
	if (HAS_PCH_SPLIT(dev))
7009
		ironlake_init_pch_refclk(dev);
7010
 
2336 Serge 7011
    LEAVE();
2330 Serge 7012
}
7013
 
7014
 
7015
 
7016
 
2327 Serge 7017
 
7018
 
7019
 
7020
 
2335 Serge 7021
static const struct drm_framebuffer_funcs intel_fb_funcs = {
7022
//	.destroy = intel_user_framebuffer_destroy,
7023
//	.create_handle = intel_user_framebuffer_create_handle,
7024
};
2327 Serge 7025
 
2335 Serge 7026
int intel_framebuffer_init(struct drm_device *dev,
7027
			   struct intel_framebuffer *intel_fb,
2342 Serge 7028
			   struct drm_mode_fb_cmd2 *mode_cmd,
2335 Serge 7029
			   struct drm_i915_gem_object *obj)
7030
{
7031
	int ret;
2327 Serge 7032
 
2335 Serge 7033
	if (obj->tiling_mode == I915_TILING_Y)
7034
		return -EINVAL;
2327 Serge 7035
 
2342 Serge 7036
	if (mode_cmd->pitches[0] & 63)
2335 Serge 7037
			return -EINVAL;
2327 Serge 7038
 
2342 Serge 7039
	switch (mode_cmd->pixel_format) {
7040
	case DRM_FORMAT_RGB332:
7041
	case DRM_FORMAT_RGB565:
7042
	case DRM_FORMAT_XRGB8888:
7043
	case DRM_FORMAT_ARGB8888:
7044
	case DRM_FORMAT_XRGB2101010:
7045
	case DRM_FORMAT_ARGB2101010:
7046
		/* RGB formats are common across chipsets */
2335 Serge 7047
		break;
2342 Serge 7048
	case DRM_FORMAT_YUYV:
7049
	case DRM_FORMAT_UYVY:
7050
	case DRM_FORMAT_YVYU:
7051
	case DRM_FORMAT_VYUY:
7052
		break;
2335 Serge 7053
	default:
2342 Serge 7054
		DRM_ERROR("unsupported pixel format\n");
2335 Serge 7055
		return -EINVAL;
7056
	}
2327 Serge 7057
 
2335 Serge 7058
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
7059
	if (ret) {
7060
		DRM_ERROR("framebuffer init failed %d\n", ret);
7061
		return ret;
7062
	}
2327 Serge 7063
 
2335 Serge 7064
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
7065
	intel_fb->obj = obj;
7066
	return 0;
7067
}
2327 Serge 7068
 
7069
 
2360 Serge 7070
static const struct drm_mode_config_funcs intel_mode_funcs = {
7071
	.fb_create = NULL /*intel_user_framebuffer_create*/,
7072
	.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
7073
};
2327 Serge 7074
 
7075
 
7076
 
7077
 
7078
 
7079
 
7080
 
7081
 
7082
 
2330 Serge 7083
bool ironlake_set_drps(struct drm_device *dev, u8 val)
7084
{
7085
	struct drm_i915_private *dev_priv = dev->dev_private;
7086
	u16 rgvswctl;
2327 Serge 7087
 
2330 Serge 7088
	rgvswctl = I915_READ16(MEMSWCTL);
7089
	if (rgvswctl & MEMCTL_CMD_STS) {
7090
		DRM_DEBUG("gpu busy, RCS change rejected\n");
7091
		return false; /* still busy with another command */
7092
	}
2327 Serge 7093
 
2330 Serge 7094
	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
7095
		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
7096
	I915_WRITE16(MEMSWCTL, rgvswctl);
7097
	POSTING_READ16(MEMSWCTL);
2327 Serge 7098
 
2330 Serge 7099
	rgvswctl |= MEMCTL_CMD_STS;
7100
	I915_WRITE16(MEMSWCTL, rgvswctl);
2327 Serge 7101
 
2330 Serge 7102
	return true;
7103
}
2327 Serge 7104
 
2330 Serge 7105
void ironlake_enable_drps(struct drm_device *dev)
7106
{
7107
	struct drm_i915_private *dev_priv = dev->dev_private;
7108
	u32 rgvmodectl = I915_READ(MEMMODECTL);
7109
	u8 fmax, fmin, fstart, vstart;
2327 Serge 7110
 
2330 Serge 7111
	/* Enable temp reporting */
7112
	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
7113
	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2327 Serge 7114
 
2330 Serge 7115
	/* 100ms RC evaluation intervals */
7116
	I915_WRITE(RCUPEI, 100000);
7117
	I915_WRITE(RCDNEI, 100000);
2327 Serge 7118
 
2330 Serge 7119
	/* Set max/min thresholds to 90ms and 80ms respectively */
7120
	I915_WRITE(RCBMAXAVG, 90000);
7121
	I915_WRITE(RCBMINAVG, 80000);
2327 Serge 7122
 
2330 Serge 7123
	I915_WRITE(MEMIHYST, 1);
2327 Serge 7124
 
2330 Serge 7125
	/* Set up min, max, and cur for interrupt handling */
7126
	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
7127
	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
7128
	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
7129
		MEMMODE_FSTART_SHIFT;
2327 Serge 7130
 
2330 Serge 7131
	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
7132
		PXVFREQ_PX_SHIFT;
2327 Serge 7133
 
2330 Serge 7134
	dev_priv->fmax = fmax; /* IPS callback will increase this */
7135
	dev_priv->fstart = fstart;
2327 Serge 7136
 
2330 Serge 7137
	dev_priv->max_delay = fstart;
7138
	dev_priv->min_delay = fmin;
7139
	dev_priv->cur_delay = fstart;
2327 Serge 7140
 
2330 Serge 7141
	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
7142
			 fmax, fmin, fstart);
2327 Serge 7143
 
2330 Serge 7144
	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2327 Serge 7145
 
2330 Serge 7146
	/*
7147
	 * Interrupts will be enabled in ironlake_irq_postinstall
7148
	 */
2327 Serge 7149
 
2330 Serge 7150
	I915_WRITE(VIDSTART, vstart);
7151
	POSTING_READ(VIDSTART);
2327 Serge 7152
 
2330 Serge 7153
	rgvmodectl |= MEMMODE_SWMODE_EN;
7154
	I915_WRITE(MEMMODECTL, rgvmodectl);
2327 Serge 7155
 
2330 Serge 7156
	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
7157
		DRM_ERROR("stuck trying to change perf mode\n");
7158
	msleep(1);
2327 Serge 7159
 
2330 Serge 7160
	ironlake_set_drps(dev, fstart);
2327 Serge 7161
 
2330 Serge 7162
	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
7163
		I915_READ(0x112e0);
7164
//   dev_priv->last_time1 = jiffies_to_msecs(jiffies);
7165
	dev_priv->last_count2 = I915_READ(0x112f4);
7166
//   getrawmonotonic(&dev_priv->last_time2);
7167
}
2327 Serge 7168
 
7169
 
7170
 
7171
 
7172
 
7173
 
7174
 
7175
 
7176
 
7177
 
7178
 
7179
 
2330 Serge 7180
static unsigned long intel_pxfreq(u32 vidfreq)
7181
{
7182
	unsigned long freq;
7183
	int div = (vidfreq & 0x3f0000) >> 16;
7184
	int post = (vidfreq & 0x3000) >> 12;
7185
	int pre = (vidfreq & 0x7);
2327 Serge 7186
 
2330 Serge 7187
	if (!pre)
7188
		return 0;
2327 Serge 7189
 
2330 Serge 7190
	freq = ((div * 133333) / ((1<
2327 Serge 7191
 
2330 Serge 7192
	return freq;
7193
}
2327 Serge 7194
 
2330 Serge 7195
void intel_init_emon(struct drm_device *dev)
7196
{
7197
	struct drm_i915_private *dev_priv = dev->dev_private;
7198
	u32 lcfuse;
7199
	u8 pxw[16];
7200
	int i;
2327 Serge 7201
 
2330 Serge 7202
	/* Disable to program */
7203
	I915_WRITE(ECR, 0);
7204
	POSTING_READ(ECR);
2327 Serge 7205
 
2330 Serge 7206
	/* Program energy weights for various events */
7207
	I915_WRITE(SDEW, 0x15040d00);
7208
	I915_WRITE(CSIEW0, 0x007f0000);
7209
	I915_WRITE(CSIEW1, 0x1e220004);
7210
	I915_WRITE(CSIEW2, 0x04000004);
2327 Serge 7211
 
2330 Serge 7212
	for (i = 0; i < 5; i++)
7213
		I915_WRITE(PEW + (i * 4), 0);
7214
	for (i = 0; i < 3; i++)
7215
		I915_WRITE(DEW + (i * 4), 0);
2327 Serge 7216
 
2330 Serge 7217
	/* Program P-state weights to account for frequency power adjustment */
7218
	for (i = 0; i < 16; i++) {
7219
		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
7220
		unsigned long freq = intel_pxfreq(pxvidfreq);
7221
		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
7222
			PXVFREQ_PX_SHIFT;
7223
		unsigned long val;
2327 Serge 7224
 
2330 Serge 7225
		val = vid * vid;
7226
		val *= (freq / 1000);
7227
		val *= 255;
7228
		val /= (127*127*900);
7229
		if (val > 0xff)
7230
			DRM_ERROR("bad pxval: %ld\n", val);
7231
		pxw[i] = val;
7232
	}
7233
	/* Render standby states get 0 weight */
7234
	pxw[14] = 0;
7235
	pxw[15] = 0;
2327 Serge 7236
 
2330 Serge 7237
	for (i = 0; i < 4; i++) {
7238
		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
7239
			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
7240
		I915_WRITE(PXW + (i * 4), val);
7241
	}
2327 Serge 7242
 
2330 Serge 7243
	/* Adjust magic regs to magic values (more experimental results) */
7244
	I915_WRITE(OGW0, 0);
7245
	I915_WRITE(OGW1, 0);
7246
	I915_WRITE(EG0, 0x00007f00);
7247
	I915_WRITE(EG1, 0x0000000e);
7248
	I915_WRITE(EG2, 0x000e0000);
7249
	I915_WRITE(EG3, 0x68000300);
7250
	I915_WRITE(EG4, 0x42000000);
7251
	I915_WRITE(EG5, 0x00140031);
7252
	I915_WRITE(EG6, 0);
7253
	I915_WRITE(EG7, 0);
2327 Serge 7254
 
2330 Serge 7255
	for (i = 0; i < 8; i++)
7256
		I915_WRITE(PXWL + (i * 4), 0);
2327 Serge 7257
 
2330 Serge 7258
	/* Enable PMON + select events */
7259
	I915_WRITE(ECR, 0x80000019);
2327 Serge 7260
 
2330 Serge 7261
	lcfuse = I915_READ(LCFUSE02);
7262
 
7263
	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
7264
}
7265
 
2342 Serge 7266
static bool intel_enable_rc6(struct drm_device *dev)
7267
{
7268
	/*
7269
	 * Respect the kernel parameter if it is set
7270
	 */
7271
	if (i915_enable_rc6 >= 0)
7272
		return i915_enable_rc6;
7273
 
7274
	/*
7275
	 * Disable RC6 on Ironlake
7276
	 */
7277
	if (INTEL_INFO(dev)->gen == 5)
7278
		return 0;
7279
 
7280
	/*
7281
	 * Disable rc6 on Sandybridge
7282
	 */
7283
	if (INTEL_INFO(dev)->gen == 6) {
7284
		DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
7285
		return 0;
7286
	}
7287
	DRM_DEBUG_DRIVER("RC6 enabled\n");
7288
	return 1;
7289
}
7290
 
2330 Serge 7291
void gen6_enable_rps(struct drm_i915_private *dev_priv)
7292
{
7293
	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
7294
	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
7295
	u32 pcu_mbox, rc6_mask = 0;
7296
	int cur_freq, min_freq, max_freq;
7297
	int i;
7298
 
7299
	/* Here begins a magic sequence of register writes to enable
7300
	 * auto-downclocking.
7301
	 *
7302
	 * Perhaps there might be some value in exposing these to
7303
	 * userspace...
7304
	 */
7305
	I915_WRITE(GEN6_RC_STATE, 0);
7306
	mutex_lock(&dev_priv->dev->struct_mutex);
7307
	gen6_gt_force_wake_get(dev_priv);
7308
 
7309
	/* disable the counters and set deterministic thresholds */
7310
	I915_WRITE(GEN6_RC_CONTROL, 0);
7311
 
7312
	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
7313
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
7314
	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
7315
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
7316
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
7317
 
7318
	for (i = 0; i < I915_NUM_RINGS; i++)
7319
		I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
7320
 
7321
	I915_WRITE(GEN6_RC_SLEEP, 0);
7322
	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
7323
	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
7324
	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
7325
	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
7326
 
2342 Serge 7327
	if (intel_enable_rc6(dev_priv->dev))
2330 Serge 7328
		rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
7329
			GEN6_RC_CTL_RC6_ENABLE;
7330
 
7331
	I915_WRITE(GEN6_RC_CONTROL,
7332
		   rc6_mask |
7333
		   GEN6_RC_CTL_EI_MODE(1) |
7334
		   GEN6_RC_CTL_HW_ENABLE);
7335
 
7336
	I915_WRITE(GEN6_RPNSWREQ,
7337
		   GEN6_FREQUENCY(10) |
7338
		   GEN6_OFFSET(0) |
7339
		   GEN6_AGGRESSIVE_TURBO);
7340
	I915_WRITE(GEN6_RC_VIDEO_FREQ,
7341
		   GEN6_FREQUENCY(12));
7342
 
7343
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7344
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
7345
		   18 << 24 |
7346
		   6 << 16);
7347
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
7348
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
7349
	I915_WRITE(GEN6_RP_UP_EI, 100000);
7350
	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
7351
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7352
	I915_WRITE(GEN6_RP_CONTROL,
7353
		   GEN6_RP_MEDIA_TURBO |
2342 Serge 7354
		   GEN6_RP_MEDIA_HW_MODE |
2330 Serge 7355
		   GEN6_RP_MEDIA_IS_GFX |
7356
		   GEN6_RP_ENABLE |
7357
		   GEN6_RP_UP_BUSY_AVG |
7358
		   GEN6_RP_DOWN_IDLE_CONT);
7359
 
7360
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7361
		     500))
7362
		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
7363
 
7364
	I915_WRITE(GEN6_PCODE_DATA, 0);
7365
	I915_WRITE(GEN6_PCODE_MAILBOX,
7366
		   GEN6_PCODE_READY |
7367
		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7368
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7369
		     500))
7370
		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
7371
 
7372
	min_freq = (rp_state_cap & 0xff0000) >> 16;
7373
	max_freq = rp_state_cap & 0xff;
7374
	cur_freq = (gt_perf_status & 0xff00) >> 8;
7375
 
7376
	/* Check for overclock support */
7377
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7378
		     500))
7379
		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
7380
	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
7381
	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
7382
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7383
		     500))
7384
		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
7385
	if (pcu_mbox & (1<<31)) { /* OC supported */
7386
		max_freq = pcu_mbox & 0xff;
7387
		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
7388
	}
7389
 
7390
	/* In units of 100MHz */
7391
	dev_priv->max_delay = max_freq;
7392
	dev_priv->min_delay = min_freq;
7393
	dev_priv->cur_delay = cur_freq;
7394
 
7395
	/* requires MSI enabled */
7396
	I915_WRITE(GEN6_PMIER,
7397
		   GEN6_PM_MBOX_EVENT |
7398
		   GEN6_PM_THERMAL_EVENT |
7399
		   GEN6_PM_RP_DOWN_TIMEOUT |
7400
		   GEN6_PM_RP_UP_THRESHOLD |
7401
		   GEN6_PM_RP_DOWN_THRESHOLD |
7402
		   GEN6_PM_RP_UP_EI_EXPIRED |
7403
		   GEN6_PM_RP_DOWN_EI_EXPIRED);
7404
//   spin_lock_irq(&dev_priv->rps_lock);
7405
//   WARN_ON(dev_priv->pm_iir != 0);
7406
	I915_WRITE(GEN6_PMIMR, 0);
7407
//   spin_unlock_irq(&dev_priv->rps_lock);
7408
	/* enable all PM interrupts */
7409
	I915_WRITE(GEN6_PMINTRMSK, 0);
7410
 
7411
	gen6_gt_force_wake_put(dev_priv);
7412
	mutex_unlock(&dev_priv->dev->struct_mutex);
7413
}
7414
 
7415
void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
7416
{
7417
	int min_freq = 15;
7418
	int gpu_freq, ia_freq, max_ia_freq;
7419
	int scaling_factor = 180;
7420
 
7421
//   max_ia_freq = cpufreq_quick_get_max(0);
7422
	/*
7423
	 * Default to measured freq if none found, PCU will ensure we don't go
7424
	 * over
7425
	 */
7426
//   if (!max_ia_freq)
7427
		max_ia_freq = 3000000; //tsc_khz;
7428
 
7429
	/* Convert from kHz to MHz */
7430
	max_ia_freq /= 1000;
7431
 
7432
	mutex_lock(&dev_priv->dev->struct_mutex);
7433
 
7434
	/*
7435
	 * For each potential GPU frequency, load a ring frequency we'd like
7436
	 * to use for memory access.  We do this by specifying the IA frequency
7437
	 * the PCU should use as a reference to determine the ring frequency.
7438
	 */
7439
	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
7440
	     gpu_freq--) {
7441
		int diff = dev_priv->max_delay - gpu_freq;
7442
 
7443
		/*
7444
		 * For GPU frequencies less than 750MHz, just use the lowest
7445
		 * ring freq.
7446
		 */
7447
		if (gpu_freq < min_freq)
7448
			ia_freq = 800;
7449
		else
7450
			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
7451
		ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
7452
 
7453
		I915_WRITE(GEN6_PCODE_DATA,
7454
			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
7455
			   gpu_freq);
7456
		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
7457
			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7458
		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
7459
			      GEN6_PCODE_READY) == 0, 10)) {
7460
			DRM_ERROR("pcode write of freq table timed out\n");
7461
			continue;
7462
		}
7463
	}
7464
 
7465
	mutex_unlock(&dev_priv->dev->struct_mutex);
7466
}
7467
 
2327 Serge 7468
static void ironlake_init_clock_gating(struct drm_device *dev)
7469
{
7470
    struct drm_i915_private *dev_priv = dev->dev_private;
7471
    uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7472
 
7473
    /* Required for FBC */
7474
    dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
7475
        DPFCRUNIT_CLOCK_GATE_DISABLE |
7476
        DPFDUNIT_CLOCK_GATE_DISABLE;
7477
    /* Required for CxSR */
7478
    dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
7479
 
7480
    I915_WRITE(PCH_3DCGDIS0,
7481
           MARIUNIT_CLOCK_GATE_DISABLE |
7482
           SVSMUNIT_CLOCK_GATE_DISABLE);
7483
    I915_WRITE(PCH_3DCGDIS1,
7484
           VFMUNIT_CLOCK_GATE_DISABLE);
7485
 
7486
    I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7487
 
7488
    /*
7489
     * According to the spec the following bits should be set in
7490
     * order to enable memory self-refresh
7491
     * The bit 22/21 of 0x42004
7492
     * The bit 5 of 0x42020
7493
     * The bit 15 of 0x45000
7494
     */
7495
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
7496
           (I915_READ(ILK_DISPLAY_CHICKEN2) |
7497
            ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7498
    I915_WRITE(ILK_DSPCLK_GATE,
7499
           (I915_READ(ILK_DSPCLK_GATE) |
7500
            ILK_DPARB_CLK_GATE));
7501
    I915_WRITE(DISP_ARB_CTL,
7502
           (I915_READ(DISP_ARB_CTL) |
7503
            DISP_FBC_WM_DIS));
7504
    I915_WRITE(WM3_LP_ILK, 0);
7505
    I915_WRITE(WM2_LP_ILK, 0);
7506
    I915_WRITE(WM1_LP_ILK, 0);
7507
 
7508
    /*
7509
     * Based on the document from hardware guys the following bits
7510
     * should be set unconditionally in order to enable FBC.
7511
     * The bit 22 of 0x42000
7512
     * The bit 22 of 0x42004
7513
     * The bit 7,8,9 of 0x42020.
7514
     */
7515
    if (IS_IRONLAKE_M(dev)) {
7516
        I915_WRITE(ILK_DISPLAY_CHICKEN1,
7517
               I915_READ(ILK_DISPLAY_CHICKEN1) |
7518
               ILK_FBCQ_DIS);
7519
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
7520
               I915_READ(ILK_DISPLAY_CHICKEN2) |
7521
               ILK_DPARB_GATE);
7522
        I915_WRITE(ILK_DSPCLK_GATE,
7523
               I915_READ(ILK_DSPCLK_GATE) |
7524
               ILK_DPFC_DIS1 |
7525
               ILK_DPFC_DIS2 |
7526
               ILK_CLK_FBC);
7527
    }
7528
 
7529
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
7530
           I915_READ(ILK_DISPLAY_CHICKEN2) |
7531
           ILK_ELPIN_409_SELECT);
7532
    I915_WRITE(_3D_CHICKEN2,
7533
           _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7534
           _3D_CHICKEN2_WM_READ_PIPELINED);
7535
}
7536
 
7537
static void gen6_init_clock_gating(struct drm_device *dev)
7538
{
7539
	struct drm_i915_private *dev_priv = dev->dev_private;
7540
	int pipe;
7541
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7542
 
7543
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7544
 
7545
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7546
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7547
		   ILK_ELPIN_409_SELECT);
7548
 
7549
	I915_WRITE(WM3_LP_ILK, 0);
7550
	I915_WRITE(WM2_LP_ILK, 0);
7551
	I915_WRITE(WM1_LP_ILK, 0);
7552
 
2342 Serge 7553
	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7554
	 * gating disable must be set.  Failure to set it results in
7555
	 * flickering pixels due to Z write ordering failures after
7556
	 * some amount of runtime in the Mesa "fire" demo, and Unigine
7557
	 * Sanctuary and Tropics, and apparently anything else with
7558
	 * alpha test or pixel discard.
7559
	 *
7560
	 * According to the spec, bit 11 (RCCUNIT) must also be set,
7561
	 * but we didn't debug actual testcases to find it out.
7562
	 */
7563
	I915_WRITE(GEN6_UCGCTL2,
7564
		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7565
		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7566
 
2327 Serge 7567
	/*
7568
	 * According to the spec the following bits should be
7569
	 * set in order to enable memory self-refresh and fbc:
7570
	 * The bit21 and bit22 of 0x42000
7571
	 * The bit21 and bit22 of 0x42004
7572
	 * The bit5 and bit7 of 0x42020
7573
	 * The bit14 of 0x70180
7574
	 * The bit14 of 0x71180
7575
	 */
7576
	I915_WRITE(ILK_DISPLAY_CHICKEN1,
7577
		   I915_READ(ILK_DISPLAY_CHICKEN1) |
7578
		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7579
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7580
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7581
		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7582
	I915_WRITE(ILK_DSPCLK_GATE,
7583
		   I915_READ(ILK_DSPCLK_GATE) |
7584
		   ILK_DPARB_CLK_GATE  |
7585
		   ILK_DPFD_CLK_GATE);
7586
 
7587
	for_each_pipe(pipe) {
7588
		I915_WRITE(DSPCNTR(pipe),
7589
			   I915_READ(DSPCNTR(pipe)) |
7590
			   DISPPLANE_TRICKLE_FEED_DISABLE);
7591
		intel_flush_display_plane(dev_priv, pipe);
7592
	}
7593
}
7594
 
7595
static void ivybridge_init_clock_gating(struct drm_device *dev)
7596
{
7597
	struct drm_i915_private *dev_priv = dev->dev_private;
7598
	int pipe;
7599
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7600
 
7601
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7602
 
7603
	I915_WRITE(WM3_LP_ILK, 0);
7604
	I915_WRITE(WM2_LP_ILK, 0);
7605
	I915_WRITE(WM1_LP_ILK, 0);
7606
 
7607
	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
7608
 
2342 Serge 7609
	I915_WRITE(IVB_CHICKEN3,
7610
		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7611
		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
7612
 
2327 Serge 7613
	for_each_pipe(pipe) {
7614
		I915_WRITE(DSPCNTR(pipe),
7615
			   I915_READ(DSPCNTR(pipe)) |
7616
			   DISPPLANE_TRICKLE_FEED_DISABLE);
7617
		intel_flush_display_plane(dev_priv, pipe);
7618
	}
7619
}
7620
 
7621
static void g4x_init_clock_gating(struct drm_device *dev)
7622
{
7623
    struct drm_i915_private *dev_priv = dev->dev_private;
7624
    uint32_t dspclk_gate;
7625
 
7626
    I915_WRITE(RENCLK_GATE_D1, 0);
7627
    I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7628
           GS_UNIT_CLOCK_GATE_DISABLE |
7629
           CL_UNIT_CLOCK_GATE_DISABLE);
7630
    I915_WRITE(RAMCLK_GATE_D, 0);
7631
    dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7632
        OVRUNIT_CLOCK_GATE_DISABLE |
7633
        OVCUNIT_CLOCK_GATE_DISABLE;
7634
    if (IS_GM45(dev))
7635
        dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7636
    I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7637
}
7638
 
7639
static void crestline_init_clock_gating(struct drm_device *dev)
7640
{
7641
	struct drm_i915_private *dev_priv = dev->dev_private;
7642
 
7643
	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7644
	I915_WRITE(RENCLK_GATE_D2, 0);
7645
	I915_WRITE(DSPCLK_GATE_D, 0);
7646
	I915_WRITE(RAMCLK_GATE_D, 0);
7647
	I915_WRITE16(DEUC, 0);
7648
}
7649
 
7650
static void broadwater_init_clock_gating(struct drm_device *dev)
7651
{
7652
	struct drm_i915_private *dev_priv = dev->dev_private;
7653
 
7654
	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7655
		   I965_RCC_CLOCK_GATE_DISABLE |
7656
		   I965_RCPB_CLOCK_GATE_DISABLE |
7657
		   I965_ISC_CLOCK_GATE_DISABLE |
7658
		   I965_FBC_CLOCK_GATE_DISABLE);
7659
	I915_WRITE(RENCLK_GATE_D2, 0);
7660
}
7661
 
7662
static void gen3_init_clock_gating(struct drm_device *dev)
7663
{
7664
    struct drm_i915_private *dev_priv = dev->dev_private;
7665
    u32 dstate = I915_READ(D_STATE);
7666
 
7667
    dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7668
        DSTATE_DOT_CLOCK_GATING;
7669
    I915_WRITE(D_STATE, dstate);
7670
}
7671
 
7672
static void i85x_init_clock_gating(struct drm_device *dev)
7673
{
7674
	struct drm_i915_private *dev_priv = dev->dev_private;
7675
 
7676
	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7677
}
7678
 
7679
static void i830_init_clock_gating(struct drm_device *dev)
7680
{
7681
	struct drm_i915_private *dev_priv = dev->dev_private;
7682
 
7683
	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7684
}
7685
 
7686
static void ibx_init_clock_gating(struct drm_device *dev)
7687
{
7688
    struct drm_i915_private *dev_priv = dev->dev_private;
7689
 
7690
    /*
7691
     * On Ibex Peak and Cougar Point, we need to disable clock
7692
     * gating for the panel power sequencer or it will fail to
7693
     * start up when no ports are active.
7694
     */
7695
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7696
}
7697
 
7698
static void cpt_init_clock_gating(struct drm_device *dev)
7699
{
7700
    struct drm_i915_private *dev_priv = dev->dev_private;
7701
    int pipe;
7702
 
7703
    /*
7704
     * On Ibex Peak and Cougar Point, we need to disable clock
7705
     * gating for the panel power sequencer or it will fail to
7706
     * start up when no ports are active.
7707
     */
7708
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7709
    I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7710
           DPLS_EDP_PPS_FIX_DIS);
7711
    /* Without this, mode sets may fail silently on FDI */
7712
    for_each_pipe(pipe)
7713
        I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
7714
}
7715
 
2332 Serge 7716
static void ironlake_teardown_rc6(struct drm_device *dev)
7717
{
7718
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 7719
 
2332 Serge 7720
	if (dev_priv->renderctx) {
7721
//		i915_gem_object_unpin(dev_priv->renderctx);
7722
//		drm_gem_object_unreference(&dev_priv->renderctx->base);
7723
		dev_priv->renderctx = NULL;
7724
	}
2327 Serge 7725
 
2332 Serge 7726
	if (dev_priv->pwrctx) {
7727
//		i915_gem_object_unpin(dev_priv->pwrctx);
7728
//		drm_gem_object_unreference(&dev_priv->pwrctx->base);
7729
		dev_priv->pwrctx = NULL;
7730
	}
7731
}
2327 Serge 7732
 
2339 Serge 7733
static void ironlake_disable_rc6(struct drm_device *dev)
7734
{
7735
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 7736
 
2339 Serge 7737
	if (I915_READ(PWRCTXA)) {
7738
		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
7739
		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
7740
		wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
7741
			 50);
2332 Serge 7742
 
2339 Serge 7743
		I915_WRITE(PWRCTXA, 0);
7744
		POSTING_READ(PWRCTXA);
2332 Serge 7745
 
2339 Serge 7746
		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7747
		POSTING_READ(RSTDBYCTL);
7748
	}
2332 Serge 7749
 
2339 Serge 7750
	ironlake_teardown_rc6(dev);
7751
}
2332 Serge 7752
 
7753
static int ironlake_setup_rc6(struct drm_device *dev)
7754
{
7755
	struct drm_i915_private *dev_priv = dev->dev_private;
7756
 
7757
	if (dev_priv->renderctx == NULL)
7758
//		dev_priv->renderctx = intel_alloc_context_page(dev);
7759
	if (!dev_priv->renderctx)
7760
		return -ENOMEM;
7761
 
7762
	if (dev_priv->pwrctx == NULL)
7763
//		dev_priv->pwrctx = intel_alloc_context_page(dev);
7764
	if (!dev_priv->pwrctx) {
7765
		ironlake_teardown_rc6(dev);
7766
		return -ENOMEM;
7767
	}
7768
 
7769
	return 0;
7770
}
7771
 
7772
void ironlake_enable_rc6(struct drm_device *dev)
7773
{
7774
	struct drm_i915_private *dev_priv = dev->dev_private;
7775
	int ret;
7776
 
7777
	/* rc6 disabled by default due to repeated reports of hanging during
7778
	 * boot and resume.
7779
	 */
2342 Serge 7780
	if (!intel_enable_rc6(dev))
2332 Serge 7781
		return;
7782
 
7783
	mutex_lock(&dev->struct_mutex);
7784
	ret = ironlake_setup_rc6(dev);
7785
	if (ret) {
7786
		mutex_unlock(&dev->struct_mutex);
7787
		return;
7788
	}
7789
 
7790
	/*
7791
	 * GPU can automatically power down the render unit if given a page
7792
	 * to save state.
7793
	 */
7794
#if 0
7795
	ret = BEGIN_LP_RING(6);
7796
	if (ret) {
7797
		ironlake_teardown_rc6(dev);
7798
		mutex_unlock(&dev->struct_mutex);
7799
		return;
7800
	}
7801
 
7802
	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
7803
	OUT_RING(MI_SET_CONTEXT);
7804
	OUT_RING(dev_priv->renderctx->gtt_offset |
7805
		 MI_MM_SPACE_GTT |
7806
		 MI_SAVE_EXT_STATE_EN |
7807
		 MI_RESTORE_EXT_STATE_EN |
7808
		 MI_RESTORE_INHIBIT);
7809
	OUT_RING(MI_SUSPEND_FLUSH);
7810
	OUT_RING(MI_NOOP);
7811
	OUT_RING(MI_FLUSH);
7812
	ADVANCE_LP_RING();
7813
 
7814
	/*
7815
	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
7816
	 * does an implicit flush, combined with MI_FLUSH above, it should be
7817
	 * safe to assume that renderctx is valid
7818
	 */
7819
	ret = intel_wait_ring_idle(LP_RING(dev_priv));
7820
	if (ret) {
7821
		DRM_ERROR("failed to enable ironlake power power savings\n");
7822
		ironlake_teardown_rc6(dev);
7823
		mutex_unlock(&dev->struct_mutex);
7824
		return;
7825
	}
7826
#endif
7827
 
7828
	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
7829
	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7830
	mutex_unlock(&dev->struct_mutex);
7831
}
7832
 
2330 Serge 7833
void intel_init_clock_gating(struct drm_device *dev)
7834
{
7835
	struct drm_i915_private *dev_priv = dev->dev_private;
7836
 
7837
	dev_priv->display.init_clock_gating(dev);
7838
 
7839
	if (dev_priv->display.init_pch_clock_gating)
7840
		dev_priv->display.init_pch_clock_gating(dev);
7841
}
7842
 
2327 Serge 7843
/* Set up chip specific display functions */
7844
static void intel_init_display(struct drm_device *dev)
7845
{
7846
    struct drm_i915_private *dev_priv = dev->dev_private;
7847
 
7848
    /* We always want a DPMS function */
7849
    if (HAS_PCH_SPLIT(dev)) {
7850
        dev_priv->display.dpms = ironlake_crtc_dpms;
7851
        dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7852
        dev_priv->display.update_plane = ironlake_update_plane;
7853
    } else {
7854
        dev_priv->display.dpms = i9xx_crtc_dpms;
7855
        dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7856
        dev_priv->display.update_plane = i9xx_update_plane;
7857
    }
7858
 
7859
    if (I915_HAS_FBC(dev)) {
7860
        if (HAS_PCH_SPLIT(dev)) {
7861
            dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7862
            dev_priv->display.enable_fbc = ironlake_enable_fbc;
7863
            dev_priv->display.disable_fbc = ironlake_disable_fbc;
7864
        } else if (IS_GM45(dev)) {
7865
            dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7866
            dev_priv->display.enable_fbc = g4x_enable_fbc;
7867
            dev_priv->display.disable_fbc = g4x_disable_fbc;
7868
        } else if (IS_CRESTLINE(dev)) {
7869
            dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7870
            dev_priv->display.enable_fbc = i8xx_enable_fbc;
7871
            dev_priv->display.disable_fbc = i8xx_disable_fbc;
7872
        }
7873
        /* 855GM needs testing */
7874
    }
7875
 
7876
    /* Returns the core display clock speed */
2342 Serge 7877
	if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
2327 Serge 7878
        dev_priv->display.get_display_clock_speed =
7879
            i945_get_display_clock_speed;
7880
    else if (IS_I915G(dev))
7881
        dev_priv->display.get_display_clock_speed =
7882
            i915_get_display_clock_speed;
7883
    else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
7884
        dev_priv->display.get_display_clock_speed =
7885
            i9xx_misc_get_display_clock_speed;
7886
    else if (IS_I915GM(dev))
7887
        dev_priv->display.get_display_clock_speed =
7888
            i915gm_get_display_clock_speed;
7889
    else if (IS_I865G(dev))
7890
        dev_priv->display.get_display_clock_speed =
7891
            i865_get_display_clock_speed;
7892
    else if (IS_I85X(dev))
7893
        dev_priv->display.get_display_clock_speed =
7894
            i855_get_display_clock_speed;
7895
    else /* 852, 830 */
7896
        dev_priv->display.get_display_clock_speed =
7897
            i830_get_display_clock_speed;
7898
 
7899
    /* For FIFO watermark updates */
7900
    if (HAS_PCH_SPLIT(dev)) {
2342 Serge 7901
		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
7902
		dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
7903
 
7904
		/* IVB configs may use multi-threaded forcewake */
7905
		if (IS_IVYBRIDGE(dev)) {
7906
			u32	ecobus;
7907
 
7908
			/* A small trick here - if the bios hasn't configured MT forcewake,
7909
			 * and if the device is in RC6, then force_wake_mt_get will not wake
7910
			 * the device and the ECOBUS read will return zero. Which will be
7911
			 * (correctly) interpreted by the test below as MT forcewake being
7912
			 * disabled.
7913
			 */
7914
			mutex_lock(&dev->struct_mutex);
7915
			__gen6_gt_force_wake_mt_get(dev_priv);
7916
			ecobus = I915_READ_NOTRACE(ECOBUS);
7917
			__gen6_gt_force_wake_mt_put(dev_priv);
7918
			mutex_unlock(&dev->struct_mutex);
7919
 
7920
			if (ecobus & FORCEWAKE_MT_ENABLE) {
7921
				DRM_DEBUG_KMS("Using MT version of forcewake\n");
7922
				dev_priv->display.force_wake_get =
7923
					__gen6_gt_force_wake_mt_get;
7924
				dev_priv->display.force_wake_put =
7925
					__gen6_gt_force_wake_mt_put;
7926
			}
7927
		}
7928
 
2327 Serge 7929
        if (HAS_PCH_IBX(dev))
7930
            dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
7931
        else if (HAS_PCH_CPT(dev))
7932
            dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
7933
 
7934
        if (IS_GEN5(dev)) {
7935
            if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
7936
                dev_priv->display.update_wm = ironlake_update_wm;
7937
            else {
7938
                DRM_DEBUG_KMS("Failed to get proper latency. "
7939
                          "Disable CxSR\n");
7940
                dev_priv->display.update_wm = NULL;
7941
            }
7942
            dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
7943
            dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
2342 Serge 7944
			dev_priv->display.write_eld = ironlake_write_eld;
2327 Serge 7945
        } else if (IS_GEN6(dev)) {
7946
            if (SNB_READ_WM0_LATENCY()) {
7947
                dev_priv->display.update_wm = sandybridge_update_wm;
2342 Serge 7948
				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
2327 Serge 7949
            } else {
7950
                DRM_DEBUG_KMS("Failed to read display plane latency. "
7951
                          "Disable CxSR\n");
7952
                dev_priv->display.update_wm = NULL;
7953
            }
7954
            dev_priv->display.fdi_link_train = gen6_fdi_link_train;
7955
            dev_priv->display.init_clock_gating = gen6_init_clock_gating;
2342 Serge 7956
			dev_priv->display.write_eld = ironlake_write_eld;
2327 Serge 7957
        } else if (IS_IVYBRIDGE(dev)) {
7958
            /* FIXME: detect B0+ stepping and use auto training */
7959
            dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
7960
            if (SNB_READ_WM0_LATENCY()) {
7961
                dev_priv->display.update_wm = sandybridge_update_wm;
2342 Serge 7962
				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
2327 Serge 7963
            } else {
7964
                DRM_DEBUG_KMS("Failed to read display plane latency. "
7965
                          "Disable CxSR\n");
7966
                dev_priv->display.update_wm = NULL;
7967
            }
7968
            dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
2342 Serge 7969
			dev_priv->display.write_eld = ironlake_write_eld;
2327 Serge 7970
        } else
7971
            dev_priv->display.update_wm = NULL;
7972
    } else if (IS_PINEVIEW(dev)) {
7973
        if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7974
                        dev_priv->is_ddr3,
7975
                        dev_priv->fsb_freq,
7976
                        dev_priv->mem_freq)) {
7977
            DRM_INFO("failed to find known CxSR latency "
7978
                 "(found ddr%s fsb freq %d, mem freq %d), "
7979
                 "disabling CxSR\n",
2342 Serge 7980
				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
2327 Serge 7981
                 dev_priv->fsb_freq, dev_priv->mem_freq);
7982
            /* Disable CxSR and never update its watermark again */
7983
            pineview_disable_cxsr(dev);
7984
            dev_priv->display.update_wm = NULL;
7985
        } else
7986
            dev_priv->display.update_wm = pineview_update_wm;
7987
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7988
    } else if (IS_G4X(dev)) {
2342 Serge 7989
		dev_priv->display.write_eld = g4x_write_eld;
2327 Serge 7990
        dev_priv->display.update_wm = g4x_update_wm;
7991
        dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7992
    } else if (IS_GEN4(dev)) {
7993
        dev_priv->display.update_wm = i965_update_wm;
7994
        if (IS_CRESTLINE(dev))
7995
            dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7996
        else if (IS_BROADWATER(dev))
7997
            dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7998
    } else if (IS_GEN3(dev)) {
7999
        dev_priv->display.update_wm = i9xx_update_wm;
8000
        dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
8001
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
8002
    } else if (IS_I865G(dev)) {
8003
        dev_priv->display.update_wm = i830_update_wm;
8004
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8005
        dev_priv->display.get_fifo_size = i830_get_fifo_size;
8006
    } else if (IS_I85X(dev)) {
8007
        dev_priv->display.update_wm = i9xx_update_wm;
8008
        dev_priv->display.get_fifo_size = i85x_get_fifo_size;
8009
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8010
    } else {
8011
        dev_priv->display.update_wm = i830_update_wm;
8012
        dev_priv->display.init_clock_gating = i830_init_clock_gating;
8013
        if (IS_845G(dev))
8014
            dev_priv->display.get_fifo_size = i845_get_fifo_size;
8015
        else
8016
            dev_priv->display.get_fifo_size = i830_get_fifo_size;
8017
    }
8018
 
8019
    /* Default just returns -ENODEV to indicate unsupported */
8020
//    dev_priv->display.queue_flip = intel_default_queue_flip;
8021
 
8022
#if 0
8023
    switch (INTEL_INFO(dev)->gen) {
8024
    case 2:
8025
        dev_priv->display.queue_flip = intel_gen2_queue_flip;
8026
        break;
8027
 
8028
    case 3:
8029
        dev_priv->display.queue_flip = intel_gen3_queue_flip;
8030
        break;
8031
 
8032
    case 4:
8033
    case 5:
8034
        dev_priv->display.queue_flip = intel_gen4_queue_flip;
8035
        break;
8036
 
8037
    case 6:
8038
        dev_priv->display.queue_flip = intel_gen6_queue_flip;
8039
        break;
8040
    case 7:
8041
        dev_priv->display.queue_flip = intel_gen7_queue_flip;
8042
        break;
8043
    }
8044
#endif
8045
}
8046
 
8047
/*
8048
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
8049
 * resume, or other times.  This quirk makes sure that's the case for
8050
 * affected systems.
8051
 */
2342 Serge 8052
static void quirk_pipea_force(struct drm_device *dev)
2327 Serge 8053
{
8054
    struct drm_i915_private *dev_priv = dev->dev_private;
8055
 
8056
    dev_priv->quirks |= QUIRK_PIPEA_FORCE;
8057
    DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
8058
}
8059
 
8060
/*
8061
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
8062
 */
8063
static void quirk_ssc_force_disable(struct drm_device *dev)
8064
{
8065
    struct drm_i915_private *dev_priv = dev->dev_private;
8066
    dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
8067
}
8068
 
8069
struct intel_quirk {
8070
    int device;
8071
    int subsystem_vendor;
8072
    int subsystem_device;
8073
    void (*hook)(struct drm_device *dev);
8074
};
8075
 
8076
struct intel_quirk intel_quirks[] = {
8077
    /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
8078
    { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
8079
    /* HP Mini needs pipe A force quirk (LP: #322104) */
2342 Serge 8080
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
2327 Serge 8081
 
8082
    /* Thinkpad R31 needs pipe A force quirk */
8083
    { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
8084
    /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
8085
    { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
8086
 
8087
    /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
8088
    { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
8089
    /* ThinkPad X40 needs pipe A force quirk */
8090
 
8091
    /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
8092
    { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
8093
 
8094
    /* 855 & before need to leave pipe A & dpll A up */
8095
    { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8096
    { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8097
 
8098
    /* Lenovo U160 cannot use SSC on LVDS */
8099
    { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
8100
 
8101
    /* Sony Vaio Y cannot use SSC on LVDS */
8102
    { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
8103
};
8104
 
8105
static void intel_init_quirks(struct drm_device *dev)
8106
{
8107
    struct pci_dev *d = dev->pdev;
8108
    int i;
8109
 
8110
    for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
8111
        struct intel_quirk *q = &intel_quirks[i];
8112
 
8113
        if (d->device == q->device &&
8114
            (d->subsystem_vendor == q->subsystem_vendor ||
8115
             q->subsystem_vendor == PCI_ANY_ID) &&
8116
            (d->subsystem_device == q->subsystem_device ||
8117
             q->subsystem_device == PCI_ANY_ID))
8118
            q->hook(dev);
8119
    }
8120
}
8121
 
2330 Serge 8122
/* Disable the VGA plane that we never use */
8123
static void i915_disable_vga(struct drm_device *dev)
8124
{
8125
	struct drm_i915_private *dev_priv = dev->dev_private;
8126
	u8 sr1;
8127
	u32 vga_reg;
2327 Serge 8128
 
2330 Serge 8129
	if (HAS_PCH_SPLIT(dev))
8130
		vga_reg = CPU_VGACNTRL;
8131
	else
8132
		vga_reg = VGACNTRL;
8133
 
8134
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
8135
    out8(VGA_SR_INDEX, 1);
8136
    sr1 = in8(VGA_SR_DATA);
8137
    out8(VGA_SR_DATA,sr1 | 1<<5);
8138
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
8139
	udelay(300);
8140
 
8141
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
8142
	POSTING_READ(vga_reg);
8143
}
8144
 
2327 Serge 8145
void intel_modeset_init(struct drm_device *dev)
8146
{
8147
    struct drm_i915_private *dev_priv = dev->dev_private;
2342 Serge 8148
	int i, ret;
2327 Serge 8149
 
8150
    drm_mode_config_init(dev);
8151
 
8152
    dev->mode_config.min_width = 0;
8153
    dev->mode_config.min_height = 0;
8154
 
8155
    dev->mode_config.funcs = (void *)&intel_mode_funcs;
8156
 
8157
    intel_init_quirks(dev);
8158
 
8159
    intel_init_display(dev);
8160
 
8161
    if (IS_GEN2(dev)) {
8162
        dev->mode_config.max_width = 2048;
8163
        dev->mode_config.max_height = 2048;
8164
    } else if (IS_GEN3(dev)) {
8165
        dev->mode_config.max_width = 4096;
8166
        dev->mode_config.max_height = 4096;
8167
    } else {
8168
        dev->mode_config.max_width = 8192;
8169
        dev->mode_config.max_height = 8192;
8170
    }
8171
    dev->mode_config.fb_base = get_bus_addr();
8172
 
8173
    DRM_DEBUG_KMS("%d display pipe%s available.\n",
8174
              dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
8175
 
8176
    for (i = 0; i < dev_priv->num_pipe; i++) {
8177
        intel_crtc_init(dev, i);
2342 Serge 8178
		ret = intel_plane_init(dev, i);
8179
		if (ret)
8180
			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
2327 Serge 8181
    }
8182
 
8183
    /* Just disable it once at startup */
8184
    i915_disable_vga(dev);
8185
    intel_setup_outputs(dev);
8186
 
8187
    intel_init_clock_gating(dev);
8188
 
8189
    if (IS_IRONLAKE_M(dev)) {
8190
        ironlake_enable_drps(dev);
8191
        intel_init_emon(dev);
8192
    }
8193
 
8194
    if (IS_GEN6(dev) || IS_GEN7(dev)) {
8195
        gen6_enable_rps(dev_priv);
8196
        gen6_update_ring_freq(dev_priv);
8197
    }
8198
 
2332 Serge 8199
//   INIT_WORK(&dev_priv->idle_work, intel_idle_update);
8200
//   setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
8201
//           (unsigned long)dev);
2330 Serge 8202
}
2327 Serge 8203
 
2332 Serge 8204
void intel_modeset_gem_init(struct drm_device *dev)
8205
{
8206
	if (IS_IRONLAKE_M(dev))
8207
		ironlake_enable_rc6(dev);
2330 Serge 8208
 
2332 Serge 8209
//	intel_setup_overlay(dev);
8210
}
8211
 
8212
 
2330 Serge 8213
/*
8214
 * Return which encoder is currently attached for connector.
8215
 */
8216
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
8217
{
8218
	return &intel_attached_encoder(connector)->base;
2327 Serge 8219
}
8220
 
2330 Serge 8221
void intel_connector_attach_encoder(struct intel_connector *connector,
8222
				    struct intel_encoder *encoder)
8223
{
8224
	connector->encoder = encoder;
8225
	drm_mode_connector_attach_encoder(&connector->base,
8226
					  &encoder->base);
8227
}
2327 Serge 8228
 
2330 Serge 8229