Subversion Repositories Kolibri OS

Rev

Rev 2344 | Rev 2360 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
27
//#include 
28
#include 
29
//#include 
30
#include 
31
#include 
2330 Serge 32
#include 
2327 Serge 33
//#include 
2342 Serge 34
#include 
2327 Serge 35
#include "drmP.h"
36
#include "intel_drv.h"
2330 Serge 37
#include "i915_drm.h"
2327 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
2327 Serge 40
#include "drm_dp_helper.h"
41
#include "drm_crtc_helper.h"
42
 
43
phys_addr_t get_bus_addr(void);
44
 
45
static inline __attribute__((const))
46
bool is_power_of_2(unsigned long n)
47
{
48
    return (n != 0 && ((n & (n - 1)) == 0));
49
}
50
 
2330 Serge 51
#define MAX_ERRNO       4095
52
 
53
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
54
 
55
static inline long IS_ERR(const void *ptr)
56
{
57
    return IS_ERR_VALUE((unsigned long)ptr);
58
}
59
 
60
static inline void *ERR_PTR(long error)
61
{
62
    return (void *) error;
63
}
64
 
65
 
2327 Serge 66
static inline int pci_read_config_word(struct pci_dev *dev, int where,
67
                    u16 *val)
68
{
69
    *val = PciRead16(dev->busnr, dev->devfn, where);
70
    return 1;
71
}
72
 
73
 
74
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
75
 
2342 Serge 76
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
2327 Serge 77
static void intel_update_watermarks(struct drm_device *dev);
78
static void intel_increase_pllclock(struct drm_crtc *crtc);
79
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
80
 
81
typedef struct {
82
    /* given values */
83
    int n;
84
    int m1, m2;
85
    int p1, p2;
86
    /* derived values */
87
    int dot;
88
    int vco;
89
    int m;
90
    int p;
91
} intel_clock_t;
92
 
93
typedef struct {
94
    int min, max;
95
} intel_range_t;
96
 
97
typedef struct {
98
    int dot_limit;
99
    int p2_slow, p2_fast;
100
} intel_p2_t;
101
 
102
#define INTEL_P2_NUM              2
103
typedef struct intel_limit intel_limit_t;
104
struct intel_limit {
105
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
106
    intel_p2_t      p2;
107
    bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
108
              int, int, intel_clock_t *);
109
};
110
 
111
/* FDI */
112
#define IRONLAKE_FDI_FREQ       2700000 /* in kHz for mode->clock */
113
 
114
static bool
115
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
116
            int target, int refclk, intel_clock_t *best_clock);
117
static bool
118
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
119
            int target, int refclk, intel_clock_t *best_clock);
120
 
121
static bool
122
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
123
              int target, int refclk, intel_clock_t *best_clock);
124
static bool
125
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
126
               int target, int refclk, intel_clock_t *best_clock);
127
 
128
static inline u32 /* units of 100MHz */
129
intel_fdi_link_freq(struct drm_device *dev)
130
{
131
	if (IS_GEN5(dev)) {
132
		struct drm_i915_private *dev_priv = dev->dev_private;
133
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
134
	} else
135
		return 27;
136
}
137
 
138
static const intel_limit_t intel_limits_i8xx_dvo = {
139
        .dot = { .min = 25000, .max = 350000 },
140
        .vco = { .min = 930000, .max = 1400000 },
141
        .n = { .min = 3, .max = 16 },
142
        .m = { .min = 96, .max = 140 },
143
        .m1 = { .min = 18, .max = 26 },
144
        .m2 = { .min = 6, .max = 16 },
145
        .p = { .min = 4, .max = 128 },
146
        .p1 = { .min = 2, .max = 33 },
147
	.p2 = { .dot_limit = 165000,
148
		.p2_slow = 4, .p2_fast = 2 },
149
	.find_pll = intel_find_best_PLL,
150
};
151
 
152
static const intel_limit_t intel_limits_i8xx_lvds = {
153
        .dot = { .min = 25000, .max = 350000 },
154
        .vco = { .min = 930000, .max = 1400000 },
155
        .n = { .min = 3, .max = 16 },
156
        .m = { .min = 96, .max = 140 },
157
        .m1 = { .min = 18, .max = 26 },
158
        .m2 = { .min = 6, .max = 16 },
159
        .p = { .min = 4, .max = 128 },
160
        .p1 = { .min = 1, .max = 6 },
161
	.p2 = { .dot_limit = 165000,
162
		.p2_slow = 14, .p2_fast = 7 },
163
	.find_pll = intel_find_best_PLL,
164
};
165
 
166
static const intel_limit_t intel_limits_i9xx_sdvo = {
167
        .dot = { .min = 20000, .max = 400000 },
168
        .vco = { .min = 1400000, .max = 2800000 },
169
        .n = { .min = 1, .max = 6 },
170
        .m = { .min = 70, .max = 120 },
171
        .m1 = { .min = 10, .max = 22 },
172
        .m2 = { .min = 5, .max = 9 },
173
        .p = { .min = 5, .max = 80 },
174
        .p1 = { .min = 1, .max = 8 },
175
	.p2 = { .dot_limit = 200000,
176
		.p2_slow = 10, .p2_fast = 5 },
177
	.find_pll = intel_find_best_PLL,
178
};
179
 
180
static const intel_limit_t intel_limits_i9xx_lvds = {
181
        .dot = { .min = 20000, .max = 400000 },
182
        .vco = { .min = 1400000, .max = 2800000 },
183
        .n = { .min = 1, .max = 6 },
184
        .m = { .min = 70, .max = 120 },
185
        .m1 = { .min = 10, .max = 22 },
186
        .m2 = { .min = 5, .max = 9 },
187
        .p = { .min = 7, .max = 98 },
188
        .p1 = { .min = 1, .max = 8 },
189
	.p2 = { .dot_limit = 112000,
190
		.p2_slow = 14, .p2_fast = 7 },
191
	.find_pll = intel_find_best_PLL,
192
};
193
 
194
 
195
static const intel_limit_t intel_limits_g4x_sdvo = {
196
	.dot = { .min = 25000, .max = 270000 },
197
	.vco = { .min = 1750000, .max = 3500000},
198
	.n = { .min = 1, .max = 4 },
199
	.m = { .min = 104, .max = 138 },
200
	.m1 = { .min = 17, .max = 23 },
201
	.m2 = { .min = 5, .max = 11 },
202
	.p = { .min = 10, .max = 30 },
203
	.p1 = { .min = 1, .max = 3},
204
	.p2 = { .dot_limit = 270000,
205
		.p2_slow = 10,
206
		.p2_fast = 10
207
	},
208
	.find_pll = intel_g4x_find_best_PLL,
209
};
210
 
211
static const intel_limit_t intel_limits_g4x_hdmi = {
212
	.dot = { .min = 22000, .max = 400000 },
213
	.vco = { .min = 1750000, .max = 3500000},
214
	.n = { .min = 1, .max = 4 },
215
	.m = { .min = 104, .max = 138 },
216
	.m1 = { .min = 16, .max = 23 },
217
	.m2 = { .min = 5, .max = 11 },
218
	.p = { .min = 5, .max = 80 },
219
	.p1 = { .min = 1, .max = 8},
220
	.p2 = { .dot_limit = 165000,
221
		.p2_slow = 10, .p2_fast = 5 },
222
	.find_pll = intel_g4x_find_best_PLL,
223
};
224
 
225
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
226
	.dot = { .min = 20000, .max = 115000 },
227
	.vco = { .min = 1750000, .max = 3500000 },
228
	.n = { .min = 1, .max = 3 },
229
	.m = { .min = 104, .max = 138 },
230
	.m1 = { .min = 17, .max = 23 },
231
	.m2 = { .min = 5, .max = 11 },
232
	.p = { .min = 28, .max = 112 },
233
	.p1 = { .min = 2, .max = 8 },
234
	.p2 = { .dot_limit = 0,
235
		.p2_slow = 14, .p2_fast = 14
236
	},
237
	.find_pll = intel_g4x_find_best_PLL,
238
};
239
 
240
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
241
	.dot = { .min = 80000, .max = 224000 },
242
	.vco = { .min = 1750000, .max = 3500000 },
243
	.n = { .min = 1, .max = 3 },
244
	.m = { .min = 104, .max = 138 },
245
	.m1 = { .min = 17, .max = 23 },
246
	.m2 = { .min = 5, .max = 11 },
247
	.p = { .min = 14, .max = 42 },
248
	.p1 = { .min = 2, .max = 6 },
249
	.p2 = { .dot_limit = 0,
250
		.p2_slow = 7, .p2_fast = 7
251
	},
252
	.find_pll = intel_g4x_find_best_PLL,
253
};
254
 
255
static const intel_limit_t intel_limits_g4x_display_port = {
256
        .dot = { .min = 161670, .max = 227000 },
257
        .vco = { .min = 1750000, .max = 3500000},
258
        .n = { .min = 1, .max = 2 },
259
        .m = { .min = 97, .max = 108 },
260
        .m1 = { .min = 0x10, .max = 0x12 },
261
        .m2 = { .min = 0x05, .max = 0x06 },
262
        .p = { .min = 10, .max = 20 },
263
        .p1 = { .min = 1, .max = 2},
264
        .p2 = { .dot_limit = 0,
265
		.p2_slow = 10, .p2_fast = 10 },
266
        .find_pll = intel_find_pll_g4x_dp,
267
};
268
 
269
static const intel_limit_t intel_limits_pineview_sdvo = {
270
        .dot = { .min = 20000, .max = 400000},
271
        .vco = { .min = 1700000, .max = 3500000 },
272
	/* Pineview's Ncounter is a ring counter */
273
        .n = { .min = 3, .max = 6 },
274
        .m = { .min = 2, .max = 256 },
275
	/* Pineview only has one combined m divider, which we treat as m2. */
276
        .m1 = { .min = 0, .max = 0 },
277
        .m2 = { .min = 0, .max = 254 },
278
        .p = { .min = 5, .max = 80 },
279
        .p1 = { .min = 1, .max = 8 },
280
	.p2 = { .dot_limit = 200000,
281
		.p2_slow = 10, .p2_fast = 5 },
282
	.find_pll = intel_find_best_PLL,
283
};
284
 
285
static const intel_limit_t intel_limits_pineview_lvds = {
286
        .dot = { .min = 20000, .max = 400000 },
287
        .vco = { .min = 1700000, .max = 3500000 },
288
        .n = { .min = 3, .max = 6 },
289
        .m = { .min = 2, .max = 256 },
290
        .m1 = { .min = 0, .max = 0 },
291
        .m2 = { .min = 0, .max = 254 },
292
        .p = { .min = 7, .max = 112 },
293
        .p1 = { .min = 1, .max = 8 },
294
	.p2 = { .dot_limit = 112000,
295
		.p2_slow = 14, .p2_fast = 14 },
296
	.find_pll = intel_find_best_PLL,
297
};
298
 
299
/* Ironlake / Sandybridge
300
 *
301
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
302
 * the range value for them is (actual_value - 2).
303
 */
304
static const intel_limit_t intel_limits_ironlake_dac = {
305
	.dot = { .min = 25000, .max = 350000 },
306
	.vco = { .min = 1760000, .max = 3510000 },
307
	.n = { .min = 1, .max = 5 },
308
	.m = { .min = 79, .max = 127 },
309
	.m1 = { .min = 12, .max = 22 },
310
	.m2 = { .min = 5, .max = 9 },
311
	.p = { .min = 5, .max = 80 },
312
	.p1 = { .min = 1, .max = 8 },
313
	.p2 = { .dot_limit = 225000,
314
		.p2_slow = 10, .p2_fast = 5 },
315
	.find_pll = intel_g4x_find_best_PLL,
316
};
317
 
318
static const intel_limit_t intel_limits_ironlake_single_lvds = {
319
	.dot = { .min = 25000, .max = 350000 },
320
	.vco = { .min = 1760000, .max = 3510000 },
321
	.n = { .min = 1, .max = 3 },
322
	.m = { .min = 79, .max = 118 },
323
	.m1 = { .min = 12, .max = 22 },
324
	.m2 = { .min = 5, .max = 9 },
325
	.p = { .min = 28, .max = 112 },
326
	.p1 = { .min = 2, .max = 8 },
327
	.p2 = { .dot_limit = 225000,
328
		.p2_slow = 14, .p2_fast = 14 },
329
	.find_pll = intel_g4x_find_best_PLL,
330
};
331
 
332
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
333
	.dot = { .min = 25000, .max = 350000 },
334
	.vco = { .min = 1760000, .max = 3510000 },
335
	.n = { .min = 1, .max = 3 },
336
	.m = { .min = 79, .max = 127 },
337
	.m1 = { .min = 12, .max = 22 },
338
	.m2 = { .min = 5, .max = 9 },
339
	.p = { .min = 14, .max = 56 },
340
	.p1 = { .min = 2, .max = 8 },
341
	.p2 = { .dot_limit = 225000,
342
		.p2_slow = 7, .p2_fast = 7 },
343
	.find_pll = intel_g4x_find_best_PLL,
344
};
345
 
346
/* LVDS 100mhz refclk limits. */
347
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
348
	.dot = { .min = 25000, .max = 350000 },
349
	.vco = { .min = 1760000, .max = 3510000 },
350
	.n = { .min = 1, .max = 2 },
351
	.m = { .min = 79, .max = 126 },
352
	.m1 = { .min = 12, .max = 22 },
353
	.m2 = { .min = 5, .max = 9 },
354
	.p = { .min = 28, .max = 112 },
2342 Serge 355
	.p1 = { .min = 2, .max = 8 },
2327 Serge 356
	.p2 = { .dot_limit = 225000,
357
		.p2_slow = 14, .p2_fast = 14 },
358
	.find_pll = intel_g4x_find_best_PLL,
359
};
360
 
361
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
362
	.dot = { .min = 25000, .max = 350000 },
363
	.vco = { .min = 1760000, .max = 3510000 },
364
	.n = { .min = 1, .max = 3 },
365
	.m = { .min = 79, .max = 126 },
366
	.m1 = { .min = 12, .max = 22 },
367
	.m2 = { .min = 5, .max = 9 },
368
	.p = { .min = 14, .max = 42 },
2342 Serge 369
	.p1 = { .min = 2, .max = 6 },
2327 Serge 370
	.p2 = { .dot_limit = 225000,
371
		.p2_slow = 7, .p2_fast = 7 },
372
	.find_pll = intel_g4x_find_best_PLL,
373
};
374
 
375
static const intel_limit_t intel_limits_ironlake_display_port = {
376
        .dot = { .min = 25000, .max = 350000 },
377
        .vco = { .min = 1760000, .max = 3510000},
378
        .n = { .min = 1, .max = 2 },
379
        .m = { .min = 81, .max = 90 },
380
        .m1 = { .min = 12, .max = 22 },
381
        .m2 = { .min = 5, .max = 9 },
382
        .p = { .min = 10, .max = 20 },
383
        .p1 = { .min = 1, .max = 2},
384
        .p2 = { .dot_limit = 0,
385
		.p2_slow = 10, .p2_fast = 10 },
386
        .find_pll = intel_find_pll_ironlake_dp,
387
};
388
 
389
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
390
						int refclk)
391
{
392
	struct drm_device *dev = crtc->dev;
393
	struct drm_i915_private *dev_priv = dev->dev_private;
394
	const intel_limit_t *limit;
395
 
396
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
397
		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
398
		    LVDS_CLKB_POWER_UP) {
399
			/* LVDS dual channel */
400
			if (refclk == 100000)
401
				limit = &intel_limits_ironlake_dual_lvds_100m;
402
			else
403
				limit = &intel_limits_ironlake_dual_lvds;
404
		} else {
405
			if (refclk == 100000)
406
				limit = &intel_limits_ironlake_single_lvds_100m;
407
			else
408
				limit = &intel_limits_ironlake_single_lvds;
409
		}
410
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
411
			HAS_eDP)
412
		limit = &intel_limits_ironlake_display_port;
413
	else
414
		limit = &intel_limits_ironlake_dac;
415
 
416
	return limit;
417
}
418
 
419
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
420
{
421
	struct drm_device *dev = crtc->dev;
422
	struct drm_i915_private *dev_priv = dev->dev_private;
423
	const intel_limit_t *limit;
424
 
425
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
426
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
427
		    LVDS_CLKB_POWER_UP)
428
			/* LVDS with dual channel */
429
			limit = &intel_limits_g4x_dual_channel_lvds;
430
		else
431
			/* LVDS with dual channel */
432
			limit = &intel_limits_g4x_single_channel_lvds;
433
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
434
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
435
		limit = &intel_limits_g4x_hdmi;
436
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
437
		limit = &intel_limits_g4x_sdvo;
2342 Serge 438
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2327 Serge 439
		limit = &intel_limits_g4x_display_port;
440
	} else /* The option is for other outputs */
441
		limit = &intel_limits_i9xx_sdvo;
442
 
443
	return limit;
444
}
445
 
446
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
447
{
448
	struct drm_device *dev = crtc->dev;
449
	const intel_limit_t *limit;
450
 
451
	if (HAS_PCH_SPLIT(dev))
452
		limit = intel_ironlake_limit(crtc, refclk);
453
	else if (IS_G4X(dev)) {
454
		limit = intel_g4x_limit(crtc);
455
	} else if (IS_PINEVIEW(dev)) {
456
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
457
			limit = &intel_limits_pineview_lvds;
458
		else
459
			limit = &intel_limits_pineview_sdvo;
460
	} else if (!IS_GEN2(dev)) {
461
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
462
			limit = &intel_limits_i9xx_lvds;
463
		else
464
			limit = &intel_limits_i9xx_sdvo;
465
	} else {
466
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
467
			limit = &intel_limits_i8xx_lvds;
468
		else
469
			limit = &intel_limits_i8xx_dvo;
470
	}
471
	return limit;
472
}
473
 
474
/* m1 is reserved as 0 in Pineview, n is a ring counter */
475
static void pineview_clock(int refclk, intel_clock_t *clock)
476
{
477
	clock->m = clock->m2 + 2;
478
	clock->p = clock->p1 * clock->p2;
479
	clock->vco = refclk * clock->m / clock->n;
480
	clock->dot = clock->vco / clock->p;
481
}
482
 
483
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
484
{
485
	if (IS_PINEVIEW(dev)) {
486
		pineview_clock(refclk, clock);
487
		return;
488
	}
489
	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
490
	clock->p = clock->p1 * clock->p2;
491
	clock->vco = refclk * clock->m / (clock->n + 2);
492
	clock->dot = clock->vco / clock->p;
493
}
494
 
495
/**
496
 * Returns whether any output on the specified pipe is of the specified type
497
 */
498
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
499
{
500
	struct drm_device *dev = crtc->dev;
501
	struct drm_mode_config *mode_config = &dev->mode_config;
502
	struct intel_encoder *encoder;
503
 
504
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
505
		if (encoder->base.crtc == crtc && encoder->type == type)
506
			return true;
507
 
508
	return false;
509
}
510
 
511
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
512
/**
513
 * Returns whether the given set of divisors are valid for a given refclk with
514
 * the given connectors.
515
 */
516
 
517
static bool intel_PLL_is_valid(struct drm_device *dev,
518
			       const intel_limit_t *limit,
519
			       const intel_clock_t *clock)
520
{
521
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
2342 Serge 522
		INTELPllInvalid("p1 out of range\n");
2327 Serge 523
	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
2342 Serge 524
		INTELPllInvalid("p out of range\n");
2327 Serge 525
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
2342 Serge 526
		INTELPllInvalid("m2 out of range\n");
2327 Serge 527
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
2342 Serge 528
		INTELPllInvalid("m1 out of range\n");
2327 Serge 529
	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
2342 Serge 530
		INTELPllInvalid("m1 <= m2\n");
2327 Serge 531
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
2342 Serge 532
		INTELPllInvalid("m out of range\n");
2327 Serge 533
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
2342 Serge 534
		INTELPllInvalid("n out of range\n");
2327 Serge 535
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
2342 Serge 536
		INTELPllInvalid("vco out of range\n");
2327 Serge 537
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
538
	 * connector, etc., rather than just a single range.
539
	 */
540
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
2342 Serge 541
		INTELPllInvalid("dot out of range\n");
2327 Serge 542
 
543
	return true;
544
}
545
 
546
static bool
547
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
548
		    int target, int refclk, intel_clock_t *best_clock)
549
 
550
{
551
	struct drm_device *dev = crtc->dev;
552
	struct drm_i915_private *dev_priv = dev->dev_private;
553
	intel_clock_t clock;
554
	int err = target;
555
 
556
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
557
	    (I915_READ(LVDS)) != 0) {
558
		/*
559
		 * For LVDS, if the panel is on, just rely on its current
560
		 * settings for dual-channel.  We haven't figured out how to
561
		 * reliably set up different single/dual channel state, if we
562
		 * even can.
563
		 */
564
		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
565
		    LVDS_CLKB_POWER_UP)
566
			clock.p2 = limit->p2.p2_fast;
567
		else
568
			clock.p2 = limit->p2.p2_slow;
569
	} else {
570
		if (target < limit->p2.dot_limit)
571
			clock.p2 = limit->p2.p2_slow;
572
		else
573
			clock.p2 = limit->p2.p2_fast;
574
	}
575
 
2342 Serge 576
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 577
 
578
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
579
	     clock.m1++) {
580
		for (clock.m2 = limit->m2.min;
581
		     clock.m2 <= limit->m2.max; clock.m2++) {
582
			/* m1 is always 0 in Pineview */
583
			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
584
				break;
585
			for (clock.n = limit->n.min;
586
			     clock.n <= limit->n.max; clock.n++) {
587
				for (clock.p1 = limit->p1.min;
588
					clock.p1 <= limit->p1.max; clock.p1++) {
589
					int this_err;
590
 
591
					intel_clock(dev, refclk, &clock);
592
					if (!intel_PLL_is_valid(dev, limit,
593
								&clock))
594
						continue;
595
 
596
					this_err = abs(clock.dot - target);
597
					if (this_err < err) {
598
						*best_clock = clock;
599
						err = this_err;
600
					}
601
				}
602
			}
603
		}
604
	}
605
 
606
	return (err != target);
607
}
608
 
609
static bool
610
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
611
			int target, int refclk, intel_clock_t *best_clock)
612
{
613
	struct drm_device *dev = crtc->dev;
614
	struct drm_i915_private *dev_priv = dev->dev_private;
615
	intel_clock_t clock;
616
	int max_n;
617
	bool found;
618
	/* approximately equals target * 0.00585 */
619
	int err_most = (target >> 8) + (target >> 9);
620
	found = false;
621
 
622
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
623
		int lvds_reg;
624
 
625
		if (HAS_PCH_SPLIT(dev))
626
			lvds_reg = PCH_LVDS;
627
		else
628
			lvds_reg = LVDS;
629
		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
630
		    LVDS_CLKB_POWER_UP)
631
			clock.p2 = limit->p2.p2_fast;
632
		else
633
			clock.p2 = limit->p2.p2_slow;
634
	} else {
635
		if (target < limit->p2.dot_limit)
636
			clock.p2 = limit->p2.p2_slow;
637
		else
638
			clock.p2 = limit->p2.p2_fast;
639
	}
640
 
641
	memset(best_clock, 0, sizeof(*best_clock));
642
	max_n = limit->n.max;
643
	/* based on hardware requirement, prefer smaller n to precision */
644
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
645
		/* based on hardware requirement, prefere larger m1,m2 */
646
		for (clock.m1 = limit->m1.max;
647
		     clock.m1 >= limit->m1.min; clock.m1--) {
648
			for (clock.m2 = limit->m2.max;
649
			     clock.m2 >= limit->m2.min; clock.m2--) {
650
				for (clock.p1 = limit->p1.max;
651
				     clock.p1 >= limit->p1.min; clock.p1--) {
652
					int this_err;
653
 
654
					intel_clock(dev, refclk, &clock);
655
					if (!intel_PLL_is_valid(dev, limit,
656
								&clock))
657
						continue;
658
 
659
					this_err = abs(clock.dot - target);
660
					if (this_err < err_most) {
661
						*best_clock = clock;
662
						err_most = this_err;
663
						max_n = clock.n;
664
						found = true;
665
					}
666
				}
667
			}
668
		}
669
	}
670
	return found;
671
}
672
 
673
static bool
674
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
675
			   int target, int refclk, intel_clock_t *best_clock)
676
{
677
	struct drm_device *dev = crtc->dev;
678
	intel_clock_t clock;
679
 
680
	if (target < 200000) {
681
		clock.n = 1;
682
		clock.p1 = 2;
683
		clock.p2 = 10;
684
		clock.m1 = 12;
685
		clock.m2 = 9;
686
	} else {
687
		clock.n = 2;
688
		clock.p1 = 1;
689
		clock.p2 = 10;
690
		clock.m1 = 14;
691
		clock.m2 = 8;
692
	}
693
	intel_clock(dev, refclk, &clock);
694
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
695
	return true;
696
}
697
 
698
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
699
static bool
700
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
701
		      int target, int refclk, intel_clock_t *best_clock)
702
{
703
	intel_clock_t clock;
704
	if (target < 200000) {
705
		clock.p1 = 2;
706
		clock.p2 = 10;
707
		clock.n = 2;
708
		clock.m1 = 23;
709
		clock.m2 = 8;
710
	} else {
711
		clock.p1 = 1;
712
		clock.p2 = 10;
713
		clock.n = 1;
714
		clock.m1 = 14;
715
		clock.m2 = 2;
716
	}
717
	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
718
	clock.p = (clock.p1 * clock.p2);
719
	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
720
	clock.vco = 0;
721
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
722
	return true;
723
}
724
 
725
/**
726
 * intel_wait_for_vblank - wait for vblank on a given pipe
727
 * @dev: drm device
728
 * @pipe: pipe to wait for
729
 *
730
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
731
 * mode setting code.
732
 */
733
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
734
{
735
	struct drm_i915_private *dev_priv = dev->dev_private;
736
	int pipestat_reg = PIPESTAT(pipe);
737
 
738
	/* Clear existing vblank status. Note this will clear any other
739
	 * sticky status fields as well.
740
	 *
741
	 * This races with i915_driver_irq_handler() with the result
742
	 * that either function could miss a vblank event.  Here it is not
743
	 * fatal, as we will either wait upon the next vblank interrupt or
744
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
745
	 * called during modeset at which time the GPU should be idle and
746
	 * should *not* be performing page flips and thus not waiting on
747
	 * vblanks...
748
	 * Currently, the result of us stealing a vblank from the irq
749
	 * handler is that a single frame will be skipped during swapbuffers.
750
	 */
751
	I915_WRITE(pipestat_reg,
752
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
753
 
754
	/* Wait for vblank interrupt bit to set */
755
	if (wait_for(I915_READ(pipestat_reg) &
756
		     PIPE_VBLANK_INTERRUPT_STATUS,
757
		     50))
758
		DRM_DEBUG_KMS("vblank wait timed out\n");
759
}
760
 
761
/*
762
 * intel_wait_for_pipe_off - wait for pipe to turn off
763
 * @dev: drm device
764
 * @pipe: pipe to wait for
765
 *
766
 * After disabling a pipe, we can't wait for vblank in the usual way,
767
 * spinning on the vblank interrupt status bit, since we won't actually
768
 * see an interrupt when the pipe is disabled.
769
 *
770
 * On Gen4 and above:
771
 *   wait for the pipe register state bit to turn off
772
 *
773
 * Otherwise:
774
 *   wait for the display line value to settle (it usually
775
 *   ends up stopping at the start of the next frame).
776
 *
777
 */
778
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
779
{
780
	struct drm_i915_private *dev_priv = dev->dev_private;
781
 
782
	if (INTEL_INFO(dev)->gen >= 4) {
783
		int reg = PIPECONF(pipe);
784
 
785
		/* Wait for the Pipe State to go off */
786
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
787
			     100))
788
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
789
	} else {
790
		u32 last_line;
791
		int reg = PIPEDSL(pipe);
792
		unsigned long timeout = jiffies + msecs_to_jiffies(100);
793
 
794
		/* Wait for the display line to settle */
795
		do {
796
			last_line = I915_READ(reg) & DSL_LINEMASK;
797
			mdelay(5);
798
		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
799
			 time_after(timeout, jiffies));
800
		if (time_after(jiffies, timeout))
801
			DRM_DEBUG_KMS("pipe_off wait timed out\n");
802
	}
803
}
804
 
805
static const char *state_string(bool enabled)
806
{
807
	return enabled ? "on" : "off";
808
}
809
 
810
/* Only for pre-ILK configs */
811
static void assert_pll(struct drm_i915_private *dev_priv,
812
		       enum pipe pipe, bool state)
813
{
814
	int reg;
815
	u32 val;
816
	bool cur_state;
817
 
818
	reg = DPLL(pipe);
819
	val = I915_READ(reg);
820
	cur_state = !!(val & DPLL_VCO_ENABLE);
821
	WARN(cur_state != state,
822
	     "PLL state assertion failure (expected %s, current %s)\n",
823
	     state_string(state), state_string(cur_state));
824
}
825
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
826
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
827
 
828
/* For ILK+ */
829
static void assert_pch_pll(struct drm_i915_private *dev_priv,
830
			   enum pipe pipe, bool state)
831
{
832
	int reg;
833
	u32 val;
834
	bool cur_state;
835
 
2342 Serge 836
	if (HAS_PCH_CPT(dev_priv->dev)) {
837
		u32 pch_dpll;
838
 
839
		pch_dpll = I915_READ(PCH_DPLL_SEL);
840
 
841
		/* Make sure the selected PLL is enabled to the transcoder */
842
		WARN(!((pch_dpll >> (4 * pipe)) & 8),
843
		     "transcoder %d PLL not enabled\n", pipe);
844
 
845
		/* Convert the transcoder pipe number to a pll pipe number */
846
		pipe = (pch_dpll >> (4 * pipe)) & 1;
847
	}
848
 
2327 Serge 849
	reg = PCH_DPLL(pipe);
850
	val = I915_READ(reg);
851
	cur_state = !!(val & DPLL_VCO_ENABLE);
852
	WARN(cur_state != state,
853
	     "PCH PLL state assertion failure (expected %s, current %s)\n",
854
	     state_string(state), state_string(cur_state));
855
}
856
#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
857
#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
858
 
859
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
860
			  enum pipe pipe, bool state)
861
{
862
	int reg;
863
	u32 val;
864
	bool cur_state;
865
 
866
	reg = FDI_TX_CTL(pipe);
867
	val = I915_READ(reg);
868
	cur_state = !!(val & FDI_TX_ENABLE);
869
	WARN(cur_state != state,
870
	     "FDI TX state assertion failure (expected %s, current %s)\n",
871
	     state_string(state), state_string(cur_state));
872
}
873
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
874
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
875
 
876
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
877
			  enum pipe pipe, bool state)
878
{
879
	int reg;
880
	u32 val;
881
	bool cur_state;
882
 
883
	reg = FDI_RX_CTL(pipe);
884
	val = I915_READ(reg);
885
	cur_state = !!(val & FDI_RX_ENABLE);
886
	WARN(cur_state != state,
887
	     "FDI RX state assertion failure (expected %s, current %s)\n",
888
	     state_string(state), state_string(cur_state));
889
}
890
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
891
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
892
 
893
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
894
				      enum pipe pipe)
895
{
896
	int reg;
897
	u32 val;
898
 
899
	/* ILK FDI PLL is always enabled */
900
	if (dev_priv->info->gen == 5)
901
		return;
902
 
903
	reg = FDI_TX_CTL(pipe);
904
	val = I915_READ(reg);
905
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
906
}
907
 
908
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
909
				      enum pipe pipe)
910
{
911
	int reg;
912
	u32 val;
913
 
914
	reg = FDI_RX_CTL(pipe);
915
	val = I915_READ(reg);
916
	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
917
}
918
 
919
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
920
				  enum pipe pipe)
921
{
922
	int pp_reg, lvds_reg;
923
	u32 val;
924
	enum pipe panel_pipe = PIPE_A;
925
	bool locked = true;
926
 
927
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
928
		pp_reg = PCH_PP_CONTROL;
929
		lvds_reg = PCH_LVDS;
930
	} else {
931
		pp_reg = PP_CONTROL;
932
		lvds_reg = LVDS;
933
	}
934
 
935
	val = I915_READ(pp_reg);
936
	if (!(val & PANEL_POWER_ON) ||
937
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
938
		locked = false;
939
 
940
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
941
		panel_pipe = PIPE_B;
942
 
943
	WARN(panel_pipe == pipe && locked,
944
	     "panel assertion failure, pipe %c regs locked\n",
945
	     pipe_name(pipe));
946
}
947
 
2342 Serge 948
void assert_pipe(struct drm_i915_private *dev_priv,
2327 Serge 949
			enum pipe pipe, bool state)
950
{
951
	int reg;
952
	u32 val;
953
	bool cur_state;
954
 
955
	reg = PIPECONF(pipe);
956
	val = I915_READ(reg);
957
	cur_state = !!(val & PIPECONF_ENABLE);
958
	WARN(cur_state != state,
959
	     "pipe %c assertion failure (expected %s, current %s)\n",
960
	     pipe_name(pipe), state_string(state), state_string(cur_state));
961
}
962
 
963
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
964
				 enum plane plane)
965
{
966
	int reg;
967
	u32 val;
968
 
969
	reg = DSPCNTR(plane);
970
	val = I915_READ(reg);
971
	WARN(!(val & DISPLAY_PLANE_ENABLE),
972
	     "plane %c assertion failure, should be active but is disabled\n",
973
	     plane_name(plane));
974
}
975
 
976
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
977
				   enum pipe pipe)
978
{
979
	int reg, i;
980
	u32 val;
981
	int cur_pipe;
982
 
983
	/* Planes are fixed to pipes on ILK+ */
984
	if (HAS_PCH_SPLIT(dev_priv->dev))
985
		return;
986
 
987
	/* Need to check both planes against the pipe */
988
	for (i = 0; i < 2; i++) {
989
		reg = DSPCNTR(i);
990
		val = I915_READ(reg);
991
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
992
			DISPPLANE_SEL_PIPE_SHIFT;
993
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
994
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
995
		     plane_name(i), pipe_name(pipe));
996
	}
997
}
998
 
999
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1000
{
1001
	u32 val;
1002
	bool enabled;
1003
 
1004
	val = I915_READ(PCH_DREF_CONTROL);
1005
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1006
			    DREF_SUPERSPREAD_SOURCE_MASK));
1007
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1008
}
1009
 
1010
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1011
				       enum pipe pipe)
1012
{
1013
	int reg;
1014
	u32 val;
1015
	bool enabled;
1016
 
1017
	reg = TRANSCONF(pipe);
1018
	val = I915_READ(reg);
1019
	enabled = !!(val & TRANS_ENABLE);
1020
	WARN(enabled,
1021
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1022
	     pipe_name(pipe));
1023
}
1024
 
1025
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1026
			    enum pipe pipe, u32 port_sel, u32 val)
1027
{
1028
	if ((val & DP_PORT_EN) == 0)
1029
		return false;
1030
 
1031
	if (HAS_PCH_CPT(dev_priv->dev)) {
1032
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1033
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1034
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1035
			return false;
1036
	} else {
1037
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1038
			return false;
1039
	}
1040
	return true;
1041
}
1042
 
1043
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1044
			      enum pipe pipe, u32 val)
1045
{
1046
	if ((val & PORT_ENABLE) == 0)
1047
		return false;
1048
 
1049
	if (HAS_PCH_CPT(dev_priv->dev)) {
1050
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1051
			return false;
1052
	} else {
1053
		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1054
			return false;
1055
	}
1056
	return true;
1057
}
1058
 
1059
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1060
			      enum pipe pipe, u32 val)
1061
{
1062
	if ((val & LVDS_PORT_EN) == 0)
1063
		return false;
1064
 
1065
	if (HAS_PCH_CPT(dev_priv->dev)) {
1066
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1067
			return false;
1068
	} else {
1069
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1070
			return false;
1071
	}
1072
	return true;
1073
}
1074
 
1075
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1076
			      enum pipe pipe, u32 val)
1077
{
1078
	if ((val & ADPA_DAC_ENABLE) == 0)
1079
		return false;
1080
	if (HAS_PCH_CPT(dev_priv->dev)) {
1081
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1082
			return false;
1083
	} else {
1084
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1085
			return false;
1086
	}
1087
	return true;
1088
}
1089
 
1090
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1091
				   enum pipe pipe, int reg, u32 port_sel)
1092
{
1093
	u32 val = I915_READ(reg);
1094
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1095
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1096
	     reg, pipe_name(pipe));
1097
}
1098
 
1099
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1100
				     enum pipe pipe, int reg)
1101
{
1102
	u32 val = I915_READ(reg);
1103
	WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1104
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1105
	     reg, pipe_name(pipe));
1106
}
1107
 
1108
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1109
				      enum pipe pipe)
1110
{
1111
	int reg;
1112
	u32 val;
1113
 
1114
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1115
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1116
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1117
 
1118
	reg = PCH_ADPA;
1119
	val = I915_READ(reg);
1120
	WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1121
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1122
	     pipe_name(pipe));
1123
 
1124
	reg = PCH_LVDS;
1125
	val = I915_READ(reg);
1126
	WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1127
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1128
	     pipe_name(pipe));
1129
 
1130
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1131
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1132
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1133
}
1134
 
1135
/**
1136
 * intel_enable_pll - enable a PLL
1137
 * @dev_priv: i915 private structure
1138
 * @pipe: pipe PLL to enable
1139
 *
1140
 * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1141
 * make sure the PLL reg is writable first though, since the panel write
1142
 * protect mechanism may be enabled.
1143
 *
1144
 * Note!  This is for pre-ILK only.
1145
 */
1146
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1147
{
1148
    int reg;
1149
    u32 val;
1150
 
1151
    /* No really, not for ILK+ */
1152
    BUG_ON(dev_priv->info->gen >= 5);
1153
 
1154
    /* PLL is protected by panel, make sure we can write it */
1155
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1156
        assert_panel_unlocked(dev_priv, pipe);
1157
 
1158
    reg = DPLL(pipe);
1159
    val = I915_READ(reg);
1160
    val |= DPLL_VCO_ENABLE;
1161
 
1162
    /* We do this three times for luck */
1163
    I915_WRITE(reg, val);
1164
    POSTING_READ(reg);
1165
    udelay(150); /* wait for warmup */
1166
    I915_WRITE(reg, val);
1167
    POSTING_READ(reg);
1168
    udelay(150); /* wait for warmup */
1169
    I915_WRITE(reg, val);
1170
    POSTING_READ(reg);
1171
    udelay(150); /* wait for warmup */
1172
}
1173
 
1174
/**
1175
 * intel_disable_pll - disable a PLL
1176
 * @dev_priv: i915 private structure
1177
 * @pipe: pipe PLL to disable
1178
 *
1179
 * Disable the PLL for @pipe, making sure the pipe is off first.
1180
 *
1181
 * Note!  This is for pre-ILK only.
1182
 */
1183
static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1184
{
1185
	int reg;
1186
	u32 val;
1187
 
1188
	/* Don't disable pipe A or pipe A PLLs if needed */
1189
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1190
		return;
1191
 
1192
	/* Make sure the pipe isn't still relying on us */
1193
	assert_pipe_disabled(dev_priv, pipe);
1194
 
1195
	reg = DPLL(pipe);
1196
	val = I915_READ(reg);
1197
	val &= ~DPLL_VCO_ENABLE;
1198
	I915_WRITE(reg, val);
1199
	POSTING_READ(reg);
1200
}
1201
 
1202
/**
1203
 * intel_enable_pch_pll - enable PCH PLL
1204
 * @dev_priv: i915 private structure
1205
 * @pipe: pipe PLL to enable
1206
 *
1207
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1208
 * drives the transcoder clock.
1209
 */
1210
static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1211
				 enum pipe pipe)
1212
{
1213
	int reg;
1214
	u32 val;
1215
 
2342 Serge 1216
	if (pipe > 1)
1217
		return;
1218
 
2327 Serge 1219
	/* PCH only available on ILK+ */
1220
	BUG_ON(dev_priv->info->gen < 5);
1221
 
1222
	/* PCH refclock must be enabled first */
1223
	assert_pch_refclk_enabled(dev_priv);
1224
 
1225
	reg = PCH_DPLL(pipe);
1226
	val = I915_READ(reg);
1227
	val |= DPLL_VCO_ENABLE;
1228
	I915_WRITE(reg, val);
1229
	POSTING_READ(reg);
1230
	udelay(200);
1231
}
1232
 
1233
static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1234
				  enum pipe pipe)
1235
{
1236
	int reg;
2342 Serge 1237
	u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1238
		pll_sel = TRANSC_DPLL_ENABLE;
2327 Serge 1239
 
2342 Serge 1240
	if (pipe > 1)
1241
		return;
1242
 
2327 Serge 1243
	/* PCH only available on ILK+ */
1244
	BUG_ON(dev_priv->info->gen < 5);
1245
 
1246
	/* Make sure transcoder isn't still depending on us */
1247
	assert_transcoder_disabled(dev_priv, pipe);
1248
 
2342 Serge 1249
	if (pipe == 0)
1250
		pll_sel |= TRANSC_DPLLA_SEL;
1251
	else if (pipe == 1)
1252
		pll_sel |= TRANSC_DPLLB_SEL;
1253
 
1254
 
1255
	if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1256
		return;
1257
 
2327 Serge 1258
	reg = PCH_DPLL(pipe);
1259
	val = I915_READ(reg);
1260
	val &= ~DPLL_VCO_ENABLE;
1261
	I915_WRITE(reg, val);
1262
	POSTING_READ(reg);
1263
	udelay(200);
1264
}
1265
 
1266
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1267
				    enum pipe pipe)
1268
{
1269
	int reg;
1270
	u32 val;
1271
 
1272
	/* PCH only available on ILK+ */
1273
	BUG_ON(dev_priv->info->gen < 5);
1274
 
1275
	/* Make sure PCH DPLL is enabled */
1276
	assert_pch_pll_enabled(dev_priv, pipe);
1277
 
1278
	/* FDI must be feeding us bits for PCH ports */
1279
	assert_fdi_tx_enabled(dev_priv, pipe);
1280
	assert_fdi_rx_enabled(dev_priv, pipe);
1281
 
1282
	reg = TRANSCONF(pipe);
1283
	val = I915_READ(reg);
1284
 
1285
	if (HAS_PCH_IBX(dev_priv->dev)) {
1286
		/*
1287
		 * make the BPC in transcoder be consistent with
1288
		 * that in pipeconf reg.
1289
		 */
1290
		val &= ~PIPE_BPC_MASK;
1291
		val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1292
	}
1293
	I915_WRITE(reg, val | TRANS_ENABLE);
1294
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1295
		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1296
}
1297
 
1298
static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1299
				     enum pipe pipe)
1300
{
1301
	int reg;
1302
	u32 val;
1303
 
1304
	/* FDI relies on the transcoder */
1305
	assert_fdi_tx_disabled(dev_priv, pipe);
1306
	assert_fdi_rx_disabled(dev_priv, pipe);
1307
 
1308
	/* Ports must be off as well */
1309
	assert_pch_ports_disabled(dev_priv, pipe);
1310
 
1311
	reg = TRANSCONF(pipe);
1312
	val = I915_READ(reg);
1313
	val &= ~TRANS_ENABLE;
1314
	I915_WRITE(reg, val);
1315
	/* wait for PCH transcoder off, transcoder state */
1316
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2342 Serge 1317
		DRM_ERROR("failed to disable transcoder %d\n", pipe);
2327 Serge 1318
}
1319
 
1320
/**
1321
 * intel_enable_pipe - enable a pipe, asserting requirements
1322
 * @dev_priv: i915 private structure
1323
 * @pipe: pipe to enable
1324
 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1325
 *
1326
 * Enable @pipe, making sure that various hardware specific requirements
1327
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1328
 *
1329
 * @pipe should be %PIPE_A or %PIPE_B.
1330
 *
1331
 * Will wait until the pipe is actually running (i.e. first vblank) before
1332
 * returning.
1333
 */
1334
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1335
			      bool pch_port)
1336
{
1337
	int reg;
1338
	u32 val;
1339
 
1340
	/*
1341
	 * A pipe without a PLL won't actually be able to drive bits from
1342
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1343
	 * need the check.
1344
	 */
1345
	if (!HAS_PCH_SPLIT(dev_priv->dev))
1346
		assert_pll_enabled(dev_priv, pipe);
1347
	else {
1348
		if (pch_port) {
1349
			/* if driving the PCH, we need FDI enabled */
1350
			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1351
			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1352
		}
1353
		/* FIXME: assert CPU port conditions for SNB+ */
1354
	}
1355
 
1356
	reg = PIPECONF(pipe);
1357
	val = I915_READ(reg);
1358
	if (val & PIPECONF_ENABLE)
1359
		return;
1360
 
1361
	I915_WRITE(reg, val | PIPECONF_ENABLE);
1362
	intel_wait_for_vblank(dev_priv->dev, pipe);
1363
}
1364
 
1365
/**
1366
 * intel_disable_pipe - disable a pipe, asserting requirements
1367
 * @dev_priv: i915 private structure
1368
 * @pipe: pipe to disable
1369
 *
1370
 * Disable @pipe, making sure that various hardware specific requirements
1371
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1372
 *
1373
 * @pipe should be %PIPE_A or %PIPE_B.
1374
 *
1375
 * Will wait until the pipe has shut down before returning.
1376
 */
1377
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1378
			       enum pipe pipe)
1379
{
1380
	int reg;
1381
	u32 val;
1382
 
1383
	/*
1384
	 * Make sure planes won't keep trying to pump pixels to us,
1385
	 * or we might hang the display.
1386
	 */
1387
	assert_planes_disabled(dev_priv, pipe);
1388
 
1389
	/* Don't disable pipe A or pipe A PLLs if needed */
1390
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1391
		return;
1392
 
1393
	reg = PIPECONF(pipe);
1394
	val = I915_READ(reg);
1395
	if ((val & PIPECONF_ENABLE) == 0)
1396
		return;
1397
 
1398
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1399
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1400
}
1401
 
1402
/*
1403
 * Plane regs are double buffered, going from enabled->disabled needs a
1404
 * trigger in order to latch.  The display address reg provides this.
1405
 */
1406
static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1407
				      enum plane plane)
1408
{
1409
	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1410
	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1411
}
1412
 
1413
/**
1414
 * intel_enable_plane - enable a display plane on a given pipe
1415
 * @dev_priv: i915 private structure
1416
 * @plane: plane to enable
1417
 * @pipe: pipe being fed
1418
 *
1419
 * Enable @plane on @pipe, making sure that @pipe is running first.
1420
 */
1421
static void intel_enable_plane(struct drm_i915_private *dev_priv,
1422
			       enum plane plane, enum pipe pipe)
1423
{
1424
	int reg;
1425
	u32 val;
1426
 
1427
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1428
	assert_pipe_enabled(dev_priv, pipe);
1429
 
1430
	reg = DSPCNTR(plane);
1431
	val = I915_READ(reg);
1432
	if (val & DISPLAY_PLANE_ENABLE)
1433
		return;
1434
 
1435
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1436
	intel_flush_display_plane(dev_priv, plane);
1437
	intel_wait_for_vblank(dev_priv->dev, pipe);
1438
}
1439
 
1440
/**
1441
 * intel_disable_plane - disable a display plane
1442
 * @dev_priv: i915 private structure
1443
 * @plane: plane to disable
1444
 * @pipe: pipe consuming the data
1445
 *
1446
 * Disable @plane; should be an independent operation.
1447
 */
1448
static void intel_disable_plane(struct drm_i915_private *dev_priv,
1449
				enum plane plane, enum pipe pipe)
1450
{
1451
	int reg;
1452
	u32 val;
1453
 
1454
	reg = DSPCNTR(plane);
1455
	val = I915_READ(reg);
1456
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1457
		return;
1458
 
1459
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1460
	intel_flush_display_plane(dev_priv, plane);
1461
	intel_wait_for_vblank(dev_priv->dev, pipe);
1462
}
1463
 
1464
static void disable_pch_dp(struct drm_i915_private *dev_priv,
1465
			   enum pipe pipe, int reg, u32 port_sel)
1466
{
1467
	u32 val = I915_READ(reg);
1468
	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1469
		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1470
		I915_WRITE(reg, val & ~DP_PORT_EN);
1471
	}
1472
}
1473
 
1474
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1475
			     enum pipe pipe, int reg)
1476
{
1477
	u32 val = I915_READ(reg);
1478
	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1479
		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1480
			      reg, pipe);
1481
		I915_WRITE(reg, val & ~PORT_ENABLE);
1482
	}
1483
}
1484
 
1485
/* Disable any ports connected to this transcoder */
1486
static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1487
				    enum pipe pipe)
1488
{
1489
	u32 reg, val;
1490
 
1491
	val = I915_READ(PCH_PP_CONTROL);
1492
	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1493
 
1494
	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1495
	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1496
	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1497
 
1498
	reg = PCH_ADPA;
1499
	val = I915_READ(reg);
1500
	if (adpa_pipe_enabled(dev_priv, val, pipe))
1501
		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1502
 
1503
	reg = PCH_LVDS;
1504
	val = I915_READ(reg);
1505
	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1506
		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1507
		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1508
		POSTING_READ(reg);
1509
		udelay(100);
1510
	}
1511
 
1512
	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1513
	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1514
	disable_pch_hdmi(dev_priv, pipe, HDMID);
1515
}
1516
 
1517
static void i8xx_disable_fbc(struct drm_device *dev)
1518
{
1519
    struct drm_i915_private *dev_priv = dev->dev_private;
1520
    u32 fbc_ctl;
1521
 
1522
    /* Disable compression */
1523
    fbc_ctl = I915_READ(FBC_CONTROL);
1524
    if ((fbc_ctl & FBC_CTL_EN) == 0)
1525
        return;
1526
 
1527
    fbc_ctl &= ~FBC_CTL_EN;
1528
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1529
 
1530
    /* Wait for compressing bit to clear */
1531
    if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1532
        DRM_DEBUG_KMS("FBC idle timed out\n");
1533
        return;
1534
    }
1535
 
1536
    DRM_DEBUG_KMS("disabled FBC\n");
1537
}
1538
 
1539
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1540
{
1541
    struct drm_device *dev = crtc->dev;
1542
    struct drm_i915_private *dev_priv = dev->dev_private;
1543
    struct drm_framebuffer *fb = crtc->fb;
1544
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1545
    struct drm_i915_gem_object *obj = intel_fb->obj;
1546
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1547
    int cfb_pitch;
1548
    int plane, i;
1549
    u32 fbc_ctl, fbc_ctl2;
1550
 
1551
    cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
2342 Serge 1552
	if (fb->pitches[0] < cfb_pitch)
1553
		cfb_pitch = fb->pitches[0];
2327 Serge 1554
 
1555
    /* FBC_CTL wants 64B units */
1556
    cfb_pitch = (cfb_pitch / 64) - 1;
1557
    plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1558
 
1559
    /* Clear old tags */
1560
    for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1561
        I915_WRITE(FBC_TAG + (i * 4), 0);
1562
 
1563
    /* Set it up... */
1564
    fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1565
    fbc_ctl2 |= plane;
1566
    I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1567
    I915_WRITE(FBC_FENCE_OFF, crtc->y);
1568
 
1569
    /* enable it... */
1570
    fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1571
    if (IS_I945GM(dev))
1572
        fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1573
    fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1574
    fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1575
    fbc_ctl |= obj->fence_reg;
1576
    I915_WRITE(FBC_CONTROL, fbc_ctl);
1577
 
1578
    DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1579
              cfb_pitch, crtc->y, intel_crtc->plane);
1580
}
1581
 
1582
static bool i8xx_fbc_enabled(struct drm_device *dev)
1583
{
1584
    struct drm_i915_private *dev_priv = dev->dev_private;
1585
 
1586
    return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1587
}
1588
 
1589
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1590
{
1591
    struct drm_device *dev = crtc->dev;
1592
    struct drm_i915_private *dev_priv = dev->dev_private;
1593
    struct drm_framebuffer *fb = crtc->fb;
1594
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1595
    struct drm_i915_gem_object *obj = intel_fb->obj;
1596
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1597
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1598
    unsigned long stall_watermark = 200;
1599
    u32 dpfc_ctl;
1600
 
1601
    dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1602
    dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1603
    I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1604
 
1605
    I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1606
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1607
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1608
    I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1609
 
1610
    /* enable it... */
1611
    I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1612
 
1613
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1614
}
1615
 
1616
static void g4x_disable_fbc(struct drm_device *dev)
1617
{
1618
    struct drm_i915_private *dev_priv = dev->dev_private;
1619
    u32 dpfc_ctl;
1620
 
1621
    /* Disable compression */
1622
    dpfc_ctl = I915_READ(DPFC_CONTROL);
1623
    if (dpfc_ctl & DPFC_CTL_EN) {
1624
        dpfc_ctl &= ~DPFC_CTL_EN;
1625
        I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1626
 
1627
        DRM_DEBUG_KMS("disabled FBC\n");
1628
    }
1629
}
1630
 
1631
static bool g4x_fbc_enabled(struct drm_device *dev)
1632
{
1633
    struct drm_i915_private *dev_priv = dev->dev_private;
1634
 
1635
    return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1636
}
1637
 
1638
static void sandybridge_blit_fbc_update(struct drm_device *dev)
1639
{
1640
	struct drm_i915_private *dev_priv = dev->dev_private;
1641
	u32 blt_ecoskpd;
1642
 
1643
	/* Make sure blitter notifies FBC of writes */
1644
	gen6_gt_force_wake_get(dev_priv);
1645
	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1646
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1647
		GEN6_BLITTER_LOCK_SHIFT;
1648
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1649
	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1650
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1651
	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1652
			 GEN6_BLITTER_LOCK_SHIFT);
1653
	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1654
	POSTING_READ(GEN6_BLITTER_ECOSKPD);
1655
	gen6_gt_force_wake_put(dev_priv);
1656
}
1657
 
1658
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1659
{
1660
    struct drm_device *dev = crtc->dev;
1661
    struct drm_i915_private *dev_priv = dev->dev_private;
1662
    struct drm_framebuffer *fb = crtc->fb;
1663
    struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1664
    struct drm_i915_gem_object *obj = intel_fb->obj;
1665
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1666
    int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1667
    unsigned long stall_watermark = 200;
1668
    u32 dpfc_ctl;
1669
 
1670
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1671
    dpfc_ctl &= DPFC_RESERVED;
1672
    dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1673
    /* Set persistent mode for front-buffer rendering, ala X. */
1674
    dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1675
    dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1676
    I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1677
 
1678
    I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1679
           (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1680
           (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1681
    I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1682
    I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1683
    /* enable it... */
1684
    I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1685
 
1686
    if (IS_GEN6(dev)) {
1687
        I915_WRITE(SNB_DPFC_CTL_SA,
1688
               SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1689
        I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1690
        sandybridge_blit_fbc_update(dev);
1691
    }
1692
 
1693
    DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1694
}
1695
 
1696
static void ironlake_disable_fbc(struct drm_device *dev)
1697
{
1698
    struct drm_i915_private *dev_priv = dev->dev_private;
1699
    u32 dpfc_ctl;
1700
 
1701
    /* Disable compression */
1702
    dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1703
    if (dpfc_ctl & DPFC_CTL_EN) {
1704
        dpfc_ctl &= ~DPFC_CTL_EN;
1705
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1706
 
1707
        DRM_DEBUG_KMS("disabled FBC\n");
1708
    }
1709
}
1710
 
1711
static bool ironlake_fbc_enabled(struct drm_device *dev)
1712
{
1713
    struct drm_i915_private *dev_priv = dev->dev_private;
1714
 
1715
    return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1716
}
1717
 
1718
bool intel_fbc_enabled(struct drm_device *dev)
1719
{
1720
	struct drm_i915_private *dev_priv = dev->dev_private;
1721
 
1722
	if (!dev_priv->display.fbc_enabled)
1723
		return false;
1724
 
1725
	return dev_priv->display.fbc_enabled(dev);
1726
}
1727
 
1728
 
1729
 
1730
 
1731
 
1732
 
1733
 
1734
 
1735
 
1736
 
1737
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1738
{
1739
	struct intel_fbc_work *work;
1740
	struct drm_device *dev = crtc->dev;
1741
	struct drm_i915_private *dev_priv = dev->dev_private;
1742
 
1743
	if (!dev_priv->display.enable_fbc)
1744
		return;
1745
 
1746
//	intel_cancel_fbc_work(dev_priv);
1747
 
1748
//	work = kzalloc(sizeof *work, GFP_KERNEL);
1749
//	if (work == NULL) {
1750
//		dev_priv->display.enable_fbc(crtc, interval);
1751
//		return;
1752
//	}
1753
 
1754
//	work->crtc = crtc;
1755
//	work->fb = crtc->fb;
1756
//	work->interval = interval;
1757
//	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1758
 
1759
//	dev_priv->fbc_work = work;
1760
 
1761
	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1762
 
1763
	/* Delay the actual enabling to let pageflipping cease and the
1764
	 * display to settle before starting the compression. Note that
1765
	 * this delay also serves a second purpose: it allows for a
1766
	 * vblank to pass after disabling the FBC before we attempt
1767
	 * to modify the control registers.
1768
	 *
1769
	 * A more complicated solution would involve tracking vblanks
1770
	 * following the termination of the page-flipping sequence
1771
	 * and indeed performing the enable as a co-routine and not
1772
	 * waiting synchronously upon the vblank.
1773
	 */
1774
//	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1775
}
1776
 
1777
void intel_disable_fbc(struct drm_device *dev)
1778
{
1779
	struct drm_i915_private *dev_priv = dev->dev_private;
1780
 
1781
//   intel_cancel_fbc_work(dev_priv);
1782
 
1783
	if (!dev_priv->display.disable_fbc)
1784
		return;
1785
 
1786
	dev_priv->display.disable_fbc(dev);
1787
	dev_priv->cfb_plane = -1;
1788
}
1789
 
1790
/**
1791
 * intel_update_fbc - enable/disable FBC as needed
1792
 * @dev: the drm_device
1793
 *
1794
 * Set up the framebuffer compression hardware at mode set time.  We
1795
 * enable it if possible:
1796
 *   - plane A only (on pre-965)
1797
 *   - no pixel mulitply/line duplication
1798
 *   - no alpha buffer discard
1799
 *   - no dual wide
1800
 *   - framebuffer <= 2048 in width, 1536 in height
1801
 *
1802
 * We can't assume that any compression will take place (worst case),
1803
 * so the compressed buffer has to be the same size as the uncompressed
1804
 * one.  It also must reside (along with the line length buffer) in
1805
 * stolen memory.
1806
 *
1807
 * We need to enable/disable FBC on a global basis.
1808
 */
1809
static void intel_update_fbc(struct drm_device *dev)
1810
{
1811
	struct drm_i915_private *dev_priv = dev->dev_private;
1812
	struct drm_crtc *crtc = NULL, *tmp_crtc;
1813
	struct intel_crtc *intel_crtc;
1814
	struct drm_framebuffer *fb;
1815
	struct intel_framebuffer *intel_fb;
1816
	struct drm_i915_gem_object *obj;
2342 Serge 1817
	int enable_fbc;
2327 Serge 1818
 
1819
	DRM_DEBUG_KMS("\n");
1820
 
1821
	if (!i915_powersave)
1822
		return;
1823
 
1824
	if (!I915_HAS_FBC(dev))
1825
		return;
1826
 
1827
	/*
1828
	 * If FBC is already on, we just have to verify that we can
1829
	 * keep it that way...
1830
	 * Need to disable if:
1831
	 *   - more than one pipe is active
1832
	 *   - changing FBC params (stride, fence, mode)
1833
	 *   - new fb is too large to fit in compressed buffer
1834
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
1835
	 */
1836
	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1837
		if (tmp_crtc->enabled && tmp_crtc->fb) {
1838
			if (crtc) {
1839
				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
2336 Serge 1840
                dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
2327 Serge 1841
				goto out_disable;
1842
			}
1843
			crtc = tmp_crtc;
1844
		}
1845
	}
1846
 
1847
	if (!crtc || crtc->fb == NULL) {
1848
		DRM_DEBUG_KMS("no output, disabling\n");
2336 Serge 1849
        dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
2327 Serge 1850
		goto out_disable;
1851
	}
1852
 
1853
	intel_crtc = to_intel_crtc(crtc);
1854
	fb = crtc->fb;
1855
	intel_fb = to_intel_framebuffer(fb);
1856
	obj = intel_fb->obj;
1857
 
2342 Serge 1858
	enable_fbc = i915_enable_fbc;
1859
	if (enable_fbc < 0) {
1860
		DRM_DEBUG_KMS("fbc set to per-chip default\n");
1861
		enable_fbc = 1;
1862
		if (INTEL_INFO(dev)->gen <= 5)
1863
			enable_fbc = 0;
1864
	}
1865
	if (!enable_fbc) {
1866
		DRM_DEBUG_KMS("fbc disabled per module param\n");
2336 Serge 1867
        dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
2327 Serge 1868
		goto out_disable;
1869
	}
1870
	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1871
		DRM_DEBUG_KMS("framebuffer too large, disabling "
1872
			      "compression\n");
2336 Serge 1873
        dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
2327 Serge 1874
		goto out_disable;
1875
	}
1876
	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1877
	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1878
		DRM_DEBUG_KMS("mode incompatible with compression, "
1879
			      "disabling\n");
2336 Serge 1880
        dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
2327 Serge 1881
		goto out_disable;
1882
	}
1883
	if ((crtc->mode.hdisplay > 2048) ||
1884
	    (crtc->mode.vdisplay > 1536)) {
1885
		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
2336 Serge 1886
        dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
2327 Serge 1887
		goto out_disable;
1888
	}
1889
	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1890
		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
2336 Serge 1891
        dev_priv->no_fbc_reason = FBC_BAD_PLANE;
2327 Serge 1892
		goto out_disable;
1893
	}
1894
 
1895
	/* The use of a CPU fence is mandatory in order to detect writes
1896
	 * by the CPU to the scanout and trigger updates to the FBC.
1897
	 */
1898
//	if (obj->tiling_mode != I915_TILING_X ||
1899
//	    obj->fence_reg == I915_FENCE_REG_NONE) {
1900
//		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1901
//		dev_priv->no_fbc_reason = FBC_NOT_TILED;
1902
//		goto out_disable;
1903
//	}
1904
 
1905
	/* If the kernel debugger is active, always disable compression */
1906
	if (in_dbg_master())
1907
		goto out_disable;
1908
 
1909
	/* If the scanout has not changed, don't modify the FBC settings.
1910
	 * Note that we make the fundamental assumption that the fb->obj
1911
	 * cannot be unpinned (and have its GTT offset and fence revoked)
1912
	 * without first being decoupled from the scanout and FBC disabled.
1913
	 */
1914
	if (dev_priv->cfb_plane == intel_crtc->plane &&
1915
	    dev_priv->cfb_fb == fb->base.id &&
1916
	    dev_priv->cfb_y == crtc->y)
1917
		return;
1918
 
1919
	if (intel_fbc_enabled(dev)) {
1920
		/* We update FBC along two paths, after changing fb/crtc
1921
		 * configuration (modeswitching) and after page-flipping
1922
		 * finishes. For the latter, we know that not only did
1923
		 * we disable the FBC at the start of the page-flip
1924
		 * sequence, but also more than one vblank has passed.
1925
		 *
1926
		 * For the former case of modeswitching, it is possible
1927
		 * to switch between two FBC valid configurations
1928
		 * instantaneously so we do need to disable the FBC
1929
		 * before we can modify its control registers. We also
1930
		 * have to wait for the next vblank for that to take
1931
		 * effect. However, since we delay enabling FBC we can
1932
		 * assume that a vblank has passed since disabling and
1933
		 * that we can safely alter the registers in the deferred
1934
		 * callback.
1935
		 *
1936
		 * In the scenario that we go from a valid to invalid
1937
		 * and then back to valid FBC configuration we have
1938
		 * no strict enforcement that a vblank occurred since
1939
		 * disabling the FBC. However, along all current pipe
1940
		 * disabling paths we do need to wait for a vblank at
1941
		 * some point. And we wait before enabling FBC anyway.
1942
		 */
1943
		DRM_DEBUG_KMS("disabling active FBC for update\n");
1944
		intel_disable_fbc(dev);
1945
	}
1946
 
1947
	intel_enable_fbc(crtc, 500);
1948
	return;
1949
 
1950
out_disable:
1951
	/* Multiple disables should be harmless */
1952
	if (intel_fbc_enabled(dev)) {
1953
		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1954
		intel_disable_fbc(dev);
1955
	}
1956
}
1957
 
2335 Serge 1958
int
1959
intel_pin_and_fence_fb_obj(struct drm_device *dev,
1960
			   struct drm_i915_gem_object *obj,
1961
			   struct intel_ring_buffer *pipelined)
1962
{
1963
	struct drm_i915_private *dev_priv = dev->dev_private;
1964
	u32 alignment;
1965
	int ret;
2327 Serge 1966
 
2335 Serge 1967
	switch (obj->tiling_mode) {
1968
	case I915_TILING_NONE:
1969
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1970
			alignment = 128 * 1024;
1971
		else if (INTEL_INFO(dev)->gen >= 4)
1972
			alignment = 4 * 1024;
1973
		else
1974
			alignment = 64 * 1024;
1975
		break;
1976
	case I915_TILING_X:
1977
		/* pin() will align the object as required by fence */
1978
		alignment = 0;
1979
		break;
1980
	case I915_TILING_Y:
1981
		/* FIXME: Is this true? */
1982
		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1983
		return -EINVAL;
1984
	default:
1985
		BUG();
1986
	}
2327 Serge 1987
 
2335 Serge 1988
	dev_priv->mm.interruptible = false;
1989
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1990
	if (ret)
1991
		goto err_interruptible;
2327 Serge 1992
 
2335 Serge 1993
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1994
	 * fence, whereas 965+ only requires a fence if using
1995
	 * framebuffer compression.  For simplicity, we always install
1996
	 * a fence as the cost is not that onerous.
1997
	 */
1998
//	if (obj->tiling_mode != I915_TILING_NONE) {
1999
//		ret = i915_gem_object_get_fence(obj, pipelined);
2000
//		if (ret)
2001
//			goto err_unpin;
2002
//	}
2327 Serge 2003
 
2335 Serge 2004
	dev_priv->mm.interruptible = true;
2005
	return 0;
2327 Serge 2006
 
2335 Serge 2007
err_unpin:
2344 Serge 2008
	i915_gem_object_unpin(obj);
2335 Serge 2009
err_interruptible:
2010
	dev_priv->mm.interruptible = true;
2011
	return ret;
2012
}
2327 Serge 2013
 
2014
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2015
                 int x, int y)
2016
{
2017
    struct drm_device *dev = crtc->dev;
2018
    struct drm_i915_private *dev_priv = dev->dev_private;
2019
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2020
    struct intel_framebuffer *intel_fb;
2021
    struct drm_i915_gem_object *obj;
2022
    int plane = intel_crtc->plane;
2023
    unsigned long Start, Offset;
2024
    u32 dspcntr;
2025
    u32 reg;
2026
 
2027
    switch (plane) {
2028
    case 0:
2029
    case 1:
2030
        break;
2031
    default:
2032
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2033
        return -EINVAL;
2034
    }
2035
 
2036
    intel_fb = to_intel_framebuffer(fb);
2037
    obj = intel_fb->obj;
2038
 
2039
    reg = DSPCNTR(plane);
2040
    dspcntr = I915_READ(reg);
2041
    /* Mask out pixel format bits in case we change it */
2042
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2043
    switch (fb->bits_per_pixel) {
2044
    case 8:
2045
        dspcntr |= DISPPLANE_8BPP;
2046
        break;
2047
    case 16:
2048
        if (fb->depth == 15)
2049
            dspcntr |= DISPPLANE_15_16BPP;
2050
        else
2051
            dspcntr |= DISPPLANE_16BPP;
2052
        break;
2053
    case 24:
2054
    case 32:
2055
        dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2056
        break;
2057
    default:
2058
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2059
        return -EINVAL;
2060
    }
2061
    if (INTEL_INFO(dev)->gen >= 4) {
2062
        if (obj->tiling_mode != I915_TILING_NONE)
2063
            dspcntr |= DISPPLANE_TILED;
2064
        else
2065
            dspcntr &= ~DISPPLANE_TILED;
2066
    }
2067
 
2068
    I915_WRITE(reg, dspcntr);
2069
 
2070
    Start = obj->gtt_offset;
2342 Serge 2071
	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2327 Serge 2072
 
2073
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2342 Serge 2074
		      Start, Offset, x, y, fb->pitches[0]);
2075
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2327 Serge 2076
    if (INTEL_INFO(dev)->gen >= 4) {
2077
        I915_WRITE(DSPSURF(plane), Start);
2078
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2079
        I915_WRITE(DSPADDR(plane), Offset);
2080
    } else
2081
        I915_WRITE(DSPADDR(plane), Start + Offset);
2082
    POSTING_READ(reg);
2083
 
2084
    return 0;
2085
}
2086
 
2087
static int ironlake_update_plane(struct drm_crtc *crtc,
2088
                 struct drm_framebuffer *fb, int x, int y)
2089
{
2090
    struct drm_device *dev = crtc->dev;
2091
    struct drm_i915_private *dev_priv = dev->dev_private;
2092
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2093
    struct intel_framebuffer *intel_fb;
2094
    struct drm_i915_gem_object *obj;
2095
    int plane = intel_crtc->plane;
2096
    unsigned long Start, Offset;
2097
    u32 dspcntr;
2098
    u32 reg;
2099
 
2100
    switch (plane) {
2101
    case 0:
2102
    case 1:
2342 Serge 2103
	case 2:
2327 Serge 2104
        break;
2105
    default:
2106
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2107
        return -EINVAL;
2108
    }
2109
 
2110
    intel_fb = to_intel_framebuffer(fb);
2111
    obj = intel_fb->obj;
2112
 
2113
    reg = DSPCNTR(plane);
2114
    dspcntr = I915_READ(reg);
2115
    /* Mask out pixel format bits in case we change it */
2116
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2117
    switch (fb->bits_per_pixel) {
2118
    case 8:
2119
        dspcntr |= DISPPLANE_8BPP;
2120
        break;
2121
    case 16:
2122
        if (fb->depth != 16)
2123
            return -EINVAL;
2124
 
2125
        dspcntr |= DISPPLANE_16BPP;
2126
        break;
2127
    case 24:
2128
    case 32:
2129
        if (fb->depth == 24)
2130
            dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2131
        else if (fb->depth == 30)
2132
            dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2133
        else
2134
            return -EINVAL;
2135
        break;
2136
    default:
2137
        DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2138
        return -EINVAL;
2139
    }
2140
 
2141
//    if (obj->tiling_mode != I915_TILING_NONE)
2142
//        dspcntr |= DISPPLANE_TILED;
2143
//    else
2144
        dspcntr &= ~DISPPLANE_TILED;
2145
 
2146
    /* must disable */
2147
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2148
 
2149
    I915_WRITE(reg, dspcntr);
2150
 
2336 Serge 2151
    Start = obj->gtt_offset;
2342 Serge 2152
	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2327 Serge 2153
 
2154
    DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2342 Serge 2155
		      Start, Offset, x, y, fb->pitches[0]);
2156
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2330 Serge 2157
	I915_WRITE(DSPSURF(plane), Start);
2158
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2159
	I915_WRITE(DSPADDR(plane), Offset);
2160
	POSTING_READ(reg);
2327 Serge 2161
 
2162
    return 0;
2163
}
2164
 
2165
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2166
static int
2167
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2168
			   int x, int y, enum mode_set_atomic state)
2169
{
2170
	struct drm_device *dev = crtc->dev;
2171
	struct drm_i915_private *dev_priv = dev->dev_private;
2172
	int ret;
2173
 
2336 Serge 2174
    ENTER();
2175
 
2327 Serge 2176
	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2177
	if (ret)
2336 Serge 2178
    {
2179
        LEAVE();
2327 Serge 2180
		return ret;
2336 Serge 2181
    };
2327 Serge 2182
 
2183
	intel_update_fbc(dev);
2184
	intel_increase_pllclock(crtc);
2336 Serge 2185
    LEAVE();
2327 Serge 2186
 
2187
	return 0;
2188
}
2189
 
2190
static int
2191
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2192
		    struct drm_framebuffer *old_fb)
2193
{
2194
	struct drm_device *dev = crtc->dev;
2195
	struct drm_i915_master_private *master_priv;
2196
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2342 Serge 2197
	int ret;
2327 Serge 2198
 
2336 Serge 2199
    ENTER();
2200
 
2327 Serge 2201
	/* no fb bound */
2202
	if (!crtc->fb) {
2203
		DRM_ERROR("No FB bound\n");
2204
		return 0;
2205
	}
2206
 
2207
	switch (intel_crtc->plane) {
2208
	case 0:
2209
	case 1:
2210
		break;
2342 Serge 2211
	case 2:
2212
		if (IS_IVYBRIDGE(dev))
2213
			break;
2214
		/* fall through otherwise */
2327 Serge 2215
	default:
2216
		DRM_ERROR("no plane for crtc\n");
2217
		return -EINVAL;
2218
	}
2219
 
2220
	mutex_lock(&dev->struct_mutex);
2221
 
2336 Serge 2222
    ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2223
					 LEAVE_ATOMIC_MODE_SET);
2327 Serge 2224
	if (ret) {
2344 Serge 2225
		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2327 Serge 2226
		mutex_unlock(&dev->struct_mutex);
2227
		DRM_ERROR("failed to update base address\n");
2336 Serge 2228
        LEAVE();
2327 Serge 2229
		return ret;
2230
	}
2231
 
2336 Serge 2232
	mutex_unlock(&dev->struct_mutex);
2327 Serge 2233
 
2336 Serge 2234
 
2235
    LEAVE();
2236
    return 0;
2237
 
2330 Serge 2238
#if 0
2239
	if (!dev->primary->master)
2336 Serge 2240
    {
2241
        LEAVE();
2330 Serge 2242
		return 0;
2336 Serge 2243
    };
2327 Serge 2244
 
2330 Serge 2245
	master_priv = dev->primary->master->driver_priv;
2246
	if (!master_priv->sarea_priv)
2336 Serge 2247
    {
2248
        LEAVE();
2330 Serge 2249
		return 0;
2336 Serge 2250
    };
2327 Serge 2251
 
2330 Serge 2252
	if (intel_crtc->pipe) {
2253
		master_priv->sarea_priv->pipeB_x = x;
2254
		master_priv->sarea_priv->pipeB_y = y;
2255
	} else {
2256
		master_priv->sarea_priv->pipeA_x = x;
2257
		master_priv->sarea_priv->pipeA_y = y;
2258
	}
2336 Serge 2259
    LEAVE();
2260
 
2261
	return 0;
2330 Serge 2262
#endif
2336 Serge 2263
 
2327 Serge 2264
}
2265
 
2266
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2267
{
2268
	struct drm_device *dev = crtc->dev;
2269
	struct drm_i915_private *dev_priv = dev->dev_private;
2270
	u32 dpa_ctl;
2271
 
2272
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2273
	dpa_ctl = I915_READ(DP_A);
2274
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2275
 
2276
	if (clock < 200000) {
2277
		u32 temp;
2278
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2279
		/* workaround for 160Mhz:
2280
		   1) program 0x4600c bits 15:0 = 0x8124
2281
		   2) program 0x46010 bit 0 = 1
2282
		   3) program 0x46034 bit 24 = 1
2283
		   4) program 0x64000 bit 14 = 1
2284
		   */
2285
		temp = I915_READ(0x4600c);
2286
		temp &= 0xffff0000;
2287
		I915_WRITE(0x4600c, temp | 0x8124);
2288
 
2289
		temp = I915_READ(0x46010);
2290
		I915_WRITE(0x46010, temp | 1);
2291
 
2292
		temp = I915_READ(0x46034);
2293
		I915_WRITE(0x46034, temp | (1 << 24));
2294
	} else {
2295
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2296
	}
2297
	I915_WRITE(DP_A, dpa_ctl);
2298
 
2299
	POSTING_READ(DP_A);
2300
	udelay(500);
2301
}
2302
 
2303
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2304
{
2305
	struct drm_device *dev = crtc->dev;
2306
	struct drm_i915_private *dev_priv = dev->dev_private;
2307
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2308
	int pipe = intel_crtc->pipe;
2309
	u32 reg, temp;
2310
 
2311
	/* enable normal train */
2312
	reg = FDI_TX_CTL(pipe);
2313
	temp = I915_READ(reg);
2314
	if (IS_IVYBRIDGE(dev)) {
2315
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2316
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2317
	} else {
2318
		temp &= ~FDI_LINK_TRAIN_NONE;
2319
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2320
	}
2321
	I915_WRITE(reg, temp);
2322
 
2323
	reg = FDI_RX_CTL(pipe);
2324
	temp = I915_READ(reg);
2325
	if (HAS_PCH_CPT(dev)) {
2326
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2327
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2328
	} else {
2329
		temp &= ~FDI_LINK_TRAIN_NONE;
2330
		temp |= FDI_LINK_TRAIN_NONE;
2331
	}
2332
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2333
 
2334
	/* wait one idle pattern time */
2335
	POSTING_READ(reg);
2336
	udelay(1000);
2337
 
2338
	/* IVB wants error correction enabled */
2339
	if (IS_IVYBRIDGE(dev))
2340
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2341
			   FDI_FE_ERRC_ENABLE);
2342
}
2343
 
2344
static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2345
{
2346
	struct drm_i915_private *dev_priv = dev->dev_private;
2347
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2348
 
2349
	flags |= FDI_PHASE_SYNC_OVR(pipe);
2350
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2351
	flags |= FDI_PHASE_SYNC_EN(pipe);
2352
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2353
	POSTING_READ(SOUTH_CHICKEN1);
2354
}
2355
 
2356
/* The FDI link training functions for ILK/Ibexpeak. */
2357
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2358
{
2359
    struct drm_device *dev = crtc->dev;
2360
    struct drm_i915_private *dev_priv = dev->dev_private;
2361
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2362
    int pipe = intel_crtc->pipe;
2363
    int plane = intel_crtc->plane;
2364
    u32 reg, temp, tries;
2365
 
2366
    /* FDI needs bits from pipe & plane first */
2367
    assert_pipe_enabled(dev_priv, pipe);
2368
    assert_plane_enabled(dev_priv, plane);
2369
 
2370
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2371
       for train result */
2372
    reg = FDI_RX_IMR(pipe);
2373
    temp = I915_READ(reg);
2374
    temp &= ~FDI_RX_SYMBOL_LOCK;
2375
    temp &= ~FDI_RX_BIT_LOCK;
2376
    I915_WRITE(reg, temp);
2377
    I915_READ(reg);
2378
    udelay(150);
2379
 
2380
    /* enable CPU FDI TX and PCH FDI RX */
2381
    reg = FDI_TX_CTL(pipe);
2382
    temp = I915_READ(reg);
2383
    temp &= ~(7 << 19);
2384
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2385
    temp &= ~FDI_LINK_TRAIN_NONE;
2386
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2387
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2388
 
2389
    reg = FDI_RX_CTL(pipe);
2390
    temp = I915_READ(reg);
2391
    temp &= ~FDI_LINK_TRAIN_NONE;
2392
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2393
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2394
 
2395
    POSTING_READ(reg);
2396
    udelay(150);
2397
 
2398
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2399
    if (HAS_PCH_IBX(dev)) {
2400
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2401
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2402
               FDI_RX_PHASE_SYNC_POINTER_EN);
2403
    }
2404
 
2405
    reg = FDI_RX_IIR(pipe);
2406
    for (tries = 0; tries < 5; tries++) {
2407
        temp = I915_READ(reg);
2408
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2409
 
2410
        if ((temp & FDI_RX_BIT_LOCK)) {
2411
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2412
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2413
            break;
2414
        }
2415
    }
2416
    if (tries == 5)
2417
        DRM_ERROR("FDI train 1 fail!\n");
2418
 
2419
    /* Train 2 */
2420
    reg = FDI_TX_CTL(pipe);
2421
    temp = I915_READ(reg);
2422
    temp &= ~FDI_LINK_TRAIN_NONE;
2423
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2424
    I915_WRITE(reg, temp);
2425
 
2426
    reg = FDI_RX_CTL(pipe);
2427
    temp = I915_READ(reg);
2428
    temp &= ~FDI_LINK_TRAIN_NONE;
2429
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2430
    I915_WRITE(reg, temp);
2431
 
2432
    POSTING_READ(reg);
2433
    udelay(150);
2434
 
2435
    reg = FDI_RX_IIR(pipe);
2436
    for (tries = 0; tries < 5; tries++) {
2437
        temp = I915_READ(reg);
2438
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2439
 
2440
        if (temp & FDI_RX_SYMBOL_LOCK) {
2441
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2442
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2443
            break;
2444
        }
2445
    }
2446
    if (tries == 5)
2447
        DRM_ERROR("FDI train 2 fail!\n");
2448
 
2449
    DRM_DEBUG_KMS("FDI train done\n");
2450
 
2451
}
2452
 
2342 Serge 2453
static const int snb_b_fdi_train_param[] = {
2327 Serge 2454
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2455
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2456
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2457
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2458
};
2459
 
2460
/* The FDI link training functions for SNB/Cougarpoint. */
2461
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2462
{
2463
    struct drm_device *dev = crtc->dev;
2464
    struct drm_i915_private *dev_priv = dev->dev_private;
2465
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2466
    int pipe = intel_crtc->pipe;
2467
    u32 reg, temp, i;
2468
 
2469
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2470
       for train result */
2471
    reg = FDI_RX_IMR(pipe);
2472
    temp = I915_READ(reg);
2473
    temp &= ~FDI_RX_SYMBOL_LOCK;
2474
    temp &= ~FDI_RX_BIT_LOCK;
2475
    I915_WRITE(reg, temp);
2476
 
2477
    POSTING_READ(reg);
2478
    udelay(150);
2479
 
2480
    /* enable CPU FDI TX and PCH FDI RX */
2481
    reg = FDI_TX_CTL(pipe);
2482
    temp = I915_READ(reg);
2483
    temp &= ~(7 << 19);
2484
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2485
    temp &= ~FDI_LINK_TRAIN_NONE;
2486
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2487
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2488
    /* SNB-B */
2489
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2490
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2491
 
2492
    reg = FDI_RX_CTL(pipe);
2493
    temp = I915_READ(reg);
2494
    if (HAS_PCH_CPT(dev)) {
2495
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2496
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2497
    } else {
2498
        temp &= ~FDI_LINK_TRAIN_NONE;
2499
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2500
    }
2501
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2502
 
2503
    POSTING_READ(reg);
2504
    udelay(150);
2505
 
2506
    if (HAS_PCH_CPT(dev))
2507
        cpt_phase_pointer_enable(dev, pipe);
2508
 
2342 Serge 2509
	for (i = 0; i < 4; i++) {
2327 Serge 2510
        reg = FDI_TX_CTL(pipe);
2511
        temp = I915_READ(reg);
2512
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2513
        temp |= snb_b_fdi_train_param[i];
2514
        I915_WRITE(reg, temp);
2515
 
2516
        POSTING_READ(reg);
2517
        udelay(500);
2518
 
2519
        reg = FDI_RX_IIR(pipe);
2520
        temp = I915_READ(reg);
2521
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2522
 
2523
        if (temp & FDI_RX_BIT_LOCK) {
2524
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2525
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2526
            break;
2527
        }
2528
    }
2529
    if (i == 4)
2530
        DRM_ERROR("FDI train 1 fail!\n");
2531
 
2532
    /* Train 2 */
2533
    reg = FDI_TX_CTL(pipe);
2534
    temp = I915_READ(reg);
2535
    temp &= ~FDI_LINK_TRAIN_NONE;
2536
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2537
    if (IS_GEN6(dev)) {
2538
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2539
        /* SNB-B */
2540
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2541
    }
2542
    I915_WRITE(reg, temp);
2543
 
2544
    reg = FDI_RX_CTL(pipe);
2545
    temp = I915_READ(reg);
2546
    if (HAS_PCH_CPT(dev)) {
2547
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2548
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2549
    } else {
2550
        temp &= ~FDI_LINK_TRAIN_NONE;
2551
        temp |= FDI_LINK_TRAIN_PATTERN_2;
2552
    }
2553
    I915_WRITE(reg, temp);
2554
 
2555
    POSTING_READ(reg);
2556
    udelay(150);
2557
 
2342 Serge 2558
	for (i = 0; i < 4; i++) {
2327 Serge 2559
        reg = FDI_TX_CTL(pipe);
2560
        temp = I915_READ(reg);
2561
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2562
        temp |= snb_b_fdi_train_param[i];
2563
        I915_WRITE(reg, temp);
2564
 
2565
        POSTING_READ(reg);
2566
        udelay(500);
2567
 
2568
        reg = FDI_RX_IIR(pipe);
2569
        temp = I915_READ(reg);
2570
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2571
 
2572
        if (temp & FDI_RX_SYMBOL_LOCK) {
2573
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2574
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2575
            break;
2576
        }
2577
    }
2578
    if (i == 4)
2579
        DRM_ERROR("FDI train 2 fail!\n");
2580
 
2581
    DRM_DEBUG_KMS("FDI train done.\n");
2582
}
2583
 
2584
/* Manual link training for Ivy Bridge A0 parts */
2585
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2586
{
2587
    struct drm_device *dev = crtc->dev;
2588
    struct drm_i915_private *dev_priv = dev->dev_private;
2589
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2590
    int pipe = intel_crtc->pipe;
2591
    u32 reg, temp, i;
2592
 
2593
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2594
       for train result */
2595
    reg = FDI_RX_IMR(pipe);
2596
    temp = I915_READ(reg);
2597
    temp &= ~FDI_RX_SYMBOL_LOCK;
2598
    temp &= ~FDI_RX_BIT_LOCK;
2599
    I915_WRITE(reg, temp);
2600
 
2601
    POSTING_READ(reg);
2602
    udelay(150);
2603
 
2604
    /* enable CPU FDI TX and PCH FDI RX */
2605
    reg = FDI_TX_CTL(pipe);
2606
    temp = I915_READ(reg);
2607
    temp &= ~(7 << 19);
2608
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2609
    temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2610
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2611
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2612
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2342 Serge 2613
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 2614
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2615
 
2616
    reg = FDI_RX_CTL(pipe);
2617
    temp = I915_READ(reg);
2618
    temp &= ~FDI_LINK_TRAIN_AUTO;
2619
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2620
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2342 Serge 2621
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 2622
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2623
 
2624
    POSTING_READ(reg);
2625
    udelay(150);
2626
 
2627
    if (HAS_PCH_CPT(dev))
2628
        cpt_phase_pointer_enable(dev, pipe);
2629
 
2342 Serge 2630
	for (i = 0; i < 4; i++) {
2327 Serge 2631
        reg = FDI_TX_CTL(pipe);
2632
        temp = I915_READ(reg);
2633
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2634
        temp |= snb_b_fdi_train_param[i];
2635
        I915_WRITE(reg, temp);
2636
 
2637
        POSTING_READ(reg);
2638
        udelay(500);
2639
 
2640
        reg = FDI_RX_IIR(pipe);
2641
        temp = I915_READ(reg);
2642
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2643
 
2644
        if (temp & FDI_RX_BIT_LOCK ||
2645
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2646
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2647
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2648
            break;
2649
        }
2650
    }
2651
    if (i == 4)
2652
        DRM_ERROR("FDI train 1 fail!\n");
2653
 
2654
    /* Train 2 */
2655
    reg = FDI_TX_CTL(pipe);
2656
    temp = I915_READ(reg);
2657
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2658
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2659
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2660
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2661
    I915_WRITE(reg, temp);
2662
 
2663
    reg = FDI_RX_CTL(pipe);
2664
    temp = I915_READ(reg);
2665
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2666
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2667
    I915_WRITE(reg, temp);
2668
 
2669
    POSTING_READ(reg);
2670
    udelay(150);
2671
 
2342 Serge 2672
	for (i = 0; i < 4; i++) {
2327 Serge 2673
        reg = FDI_TX_CTL(pipe);
2674
        temp = I915_READ(reg);
2675
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2676
        temp |= snb_b_fdi_train_param[i];
2677
        I915_WRITE(reg, temp);
2678
 
2679
        POSTING_READ(reg);
2680
        udelay(500);
2681
 
2682
        reg = FDI_RX_IIR(pipe);
2683
        temp = I915_READ(reg);
2684
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2685
 
2686
        if (temp & FDI_RX_SYMBOL_LOCK) {
2687
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2688
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2689
            break;
2690
        }
2691
    }
2692
    if (i == 4)
2693
        DRM_ERROR("FDI train 2 fail!\n");
2694
 
2695
    DRM_DEBUG_KMS("FDI train done.\n");
2696
}
2697
 
2698
static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2699
{
2700
	struct drm_device *dev = crtc->dev;
2701
	struct drm_i915_private *dev_priv = dev->dev_private;
2702
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2703
	int pipe = intel_crtc->pipe;
2704
	u32 reg, temp;
2705
 
2706
	/* Write the TU size bits so error detection works */
2707
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2708
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2709
 
2710
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2711
	reg = FDI_RX_CTL(pipe);
2712
	temp = I915_READ(reg);
2713
	temp &= ~((0x7 << 19) | (0x7 << 16));
2714
	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2715
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2716
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2717
 
2718
	POSTING_READ(reg);
2719
	udelay(200);
2720
 
2721
	/* Switch from Rawclk to PCDclk */
2722
	temp = I915_READ(reg);
2723
	I915_WRITE(reg, temp | FDI_PCDCLK);
2724
 
2725
	POSTING_READ(reg);
2726
	udelay(200);
2727
 
2728
	/* Enable CPU FDI TX PLL, always on for Ironlake */
2729
	reg = FDI_TX_CTL(pipe);
2730
	temp = I915_READ(reg);
2731
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2732
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2733
 
2734
		POSTING_READ(reg);
2735
		udelay(100);
2736
	}
2737
}
2738
 
2739
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2740
{
2741
	struct drm_i915_private *dev_priv = dev->dev_private;
2742
	u32 flags = I915_READ(SOUTH_CHICKEN1);
2743
 
2744
	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2745
	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2746
	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2747
	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2748
	POSTING_READ(SOUTH_CHICKEN1);
2749
}
2750
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2751
{
2752
	struct drm_device *dev = crtc->dev;
2753
	struct drm_i915_private *dev_priv = dev->dev_private;
2754
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2755
	int pipe = intel_crtc->pipe;
2756
	u32 reg, temp;
2757
 
2758
	/* disable CPU FDI tx and PCH FDI rx */
2759
	reg = FDI_TX_CTL(pipe);
2760
	temp = I915_READ(reg);
2761
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2762
	POSTING_READ(reg);
2763
 
2764
	reg = FDI_RX_CTL(pipe);
2765
	temp = I915_READ(reg);
2766
	temp &= ~(0x7 << 16);
2767
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2768
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2769
 
2770
	POSTING_READ(reg);
2771
	udelay(100);
2772
 
2773
	/* Ironlake workaround, disable clock pointer after downing FDI */
2774
	if (HAS_PCH_IBX(dev)) {
2775
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2776
		I915_WRITE(FDI_RX_CHICKEN(pipe),
2777
			   I915_READ(FDI_RX_CHICKEN(pipe) &
2778
				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2779
	} else if (HAS_PCH_CPT(dev)) {
2780
		cpt_phase_pointer_disable(dev, pipe);
2781
	}
2782
 
2783
	/* still set train pattern 1 */
2784
	reg = FDI_TX_CTL(pipe);
2785
	temp = I915_READ(reg);
2786
	temp &= ~FDI_LINK_TRAIN_NONE;
2787
	temp |= FDI_LINK_TRAIN_PATTERN_1;
2788
	I915_WRITE(reg, temp);
2789
 
2790
	reg = FDI_RX_CTL(pipe);
2791
	temp = I915_READ(reg);
2792
	if (HAS_PCH_CPT(dev)) {
2793
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2794
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2795
	} else {
2796
		temp &= ~FDI_LINK_TRAIN_NONE;
2797
		temp |= FDI_LINK_TRAIN_PATTERN_1;
2798
	}
2799
	/* BPC in FDI rx is consistent with that in PIPECONF */
2800
	temp &= ~(0x07 << 16);
2801
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2802
	I915_WRITE(reg, temp);
2803
 
2804
	POSTING_READ(reg);
2805
	udelay(100);
2806
}
2807
 
2808
/*
2809
 * When we disable a pipe, we need to clear any pending scanline wait events
2810
 * to avoid hanging the ring, which we assume we are waiting on.
2811
 */
2812
static void intel_clear_scanline_wait(struct drm_device *dev)
2813
{
2814
	struct drm_i915_private *dev_priv = dev->dev_private;
2815
	struct intel_ring_buffer *ring;
2816
	u32 tmp;
2817
 
2818
	if (IS_GEN2(dev))
2819
		/* Can't break the hang on i8xx */
2820
		return;
2821
 
2822
	ring = LP_RING(dev_priv);
2823
	tmp = I915_READ_CTL(ring);
2824
	if (tmp & RING_WAIT)
2825
		I915_WRITE_CTL(ring, tmp);
2826
}
2827
 
2828
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2829
{
2830
	struct drm_i915_gem_object *obj;
2831
	struct drm_i915_private *dev_priv;
2832
 
2833
	if (crtc->fb == NULL)
2834
		return;
2835
 
2836
	obj = to_intel_framebuffer(crtc->fb)->obj;
2837
	dev_priv = crtc->dev->dev_private;
2838
//	wait_event(dev_priv->pending_flip_queue,
2839
//		   atomic_read(&obj->pending_flip) == 0);
2840
}
2841
 
2842
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2843
{
2844
	struct drm_device *dev = crtc->dev;
2845
	struct drm_mode_config *mode_config = &dev->mode_config;
2846
	struct intel_encoder *encoder;
2847
 
2848
	/*
2849
	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2850
	 * must be driven by its own crtc; no sharing is possible.
2851
	 */
2852
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2853
		if (encoder->base.crtc != crtc)
2854
			continue;
2855
 
2856
		switch (encoder->type) {
2857
		case INTEL_OUTPUT_EDP:
2858
			if (!intel_encoder_is_pch_edp(&encoder->base))
2859
				return false;
2860
			continue;
2861
		}
2862
	}
2863
 
2864
	return true;
2865
}
2866
 
2867
/*
2868
 * Enable PCH resources required for PCH ports:
2869
 *   - PCH PLLs
2870
 *   - FDI training & RX/TX
2871
 *   - update transcoder timings
2872
 *   - DP transcoding bits
2873
 *   - transcoder
2874
 */
2875
static void ironlake_pch_enable(struct drm_crtc *crtc)
2876
{
2877
	struct drm_device *dev = crtc->dev;
2878
	struct drm_i915_private *dev_priv = dev->dev_private;
2879
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2880
	int pipe = intel_crtc->pipe;
2342 Serge 2881
	u32 reg, temp, transc_sel;
2327 Serge 2882
 
2883
	/* For PCH output, training FDI link */
2884
	dev_priv->display.fdi_link_train(crtc);
2885
 
2886
	intel_enable_pch_pll(dev_priv, pipe);
2887
 
2888
	if (HAS_PCH_CPT(dev)) {
2342 Serge 2889
		transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
2890
			TRANSC_DPLLB_SEL;
2891
 
2327 Serge 2892
		/* Be sure PCH DPLL SEL is set */
2893
		temp = I915_READ(PCH_DPLL_SEL);
2342 Serge 2894
		if (pipe == 0) {
2895
			temp &= ~(TRANSA_DPLLB_SEL);
2327 Serge 2896
			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2342 Serge 2897
		} else if (pipe == 1) {
2898
			temp &= ~(TRANSB_DPLLB_SEL);
2327 Serge 2899
			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2342 Serge 2900
		} else if (pipe == 2) {
2901
			temp &= ~(TRANSC_DPLLB_SEL);
2902
			temp |= (TRANSC_DPLL_ENABLE | transc_sel);
2903
		}
2327 Serge 2904
		I915_WRITE(PCH_DPLL_SEL, temp);
2905
	}
2906
 
2907
	/* set transcoder timing, panel must allow it */
2908
	assert_panel_unlocked(dev_priv, pipe);
2909
	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2910
	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2911
	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2912
 
2913
	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2914
	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2915
	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2916
 
2917
	intel_fdi_normal_train(crtc);
2918
 
2919
	/* For PCH DP, enable TRANS_DP_CTL */
2920
	if (HAS_PCH_CPT(dev) &&
2342 Serge 2921
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2922
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2327 Serge 2923
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2924
		reg = TRANS_DP_CTL(pipe);
2925
		temp = I915_READ(reg);
2926
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2927
			  TRANS_DP_SYNC_MASK |
2928
			  TRANS_DP_BPC_MASK);
2929
		temp |= (TRANS_DP_OUTPUT_ENABLE |
2930
			 TRANS_DP_ENH_FRAMING);
2931
		temp |= bpc << 9; /* same format but at 11:9 */
2932
 
2933
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2934
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2935
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2936
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2937
 
2938
		switch (intel_trans_dp_port_sel(crtc)) {
2939
		case PCH_DP_B:
2940
			temp |= TRANS_DP_PORT_SEL_B;
2941
			break;
2942
		case PCH_DP_C:
2943
			temp |= TRANS_DP_PORT_SEL_C;
2944
			break;
2945
		case PCH_DP_D:
2946
			temp |= TRANS_DP_PORT_SEL_D;
2947
			break;
2948
		default:
2949
			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2950
			temp |= TRANS_DP_PORT_SEL_B;
2951
			break;
2952
		}
2953
 
2954
		I915_WRITE(reg, temp);
2955
	}
2956
 
2957
	intel_enable_transcoder(dev_priv, pipe);
2958
}
2959
 
2342 Serge 2960
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
2961
{
2962
	struct drm_i915_private *dev_priv = dev->dev_private;
2963
	int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
2964
	u32 temp;
2965
 
2966
	temp = I915_READ(dslreg);
2967
	udelay(500);
2968
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
2969
		/* Without this, mode sets may fail silently on FDI */
2970
		I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
2971
		udelay(250);
2972
		I915_WRITE(tc2reg, 0);
2973
		if (wait_for(I915_READ(dslreg) != temp, 5))
2974
			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
2975
	}
2976
}
2977
 
2327 Serge 2978
static void ironlake_crtc_enable(struct drm_crtc *crtc)
2979
{
2980
    struct drm_device *dev = crtc->dev;
2981
    struct drm_i915_private *dev_priv = dev->dev_private;
2982
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2983
    int pipe = intel_crtc->pipe;
2984
    int plane = intel_crtc->plane;
2985
    u32 temp;
2986
    bool is_pch_port;
2987
 
2988
    if (intel_crtc->active)
2989
        return;
2990
 
2991
    intel_crtc->active = true;
2992
    intel_update_watermarks(dev);
2993
 
2994
    if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2995
        temp = I915_READ(PCH_LVDS);
2996
        if ((temp & LVDS_PORT_EN) == 0)
2997
            I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2998
    }
2999
 
3000
    is_pch_port = intel_crtc_driving_pch(crtc);
3001
 
3002
    if (is_pch_port)
3003
        ironlake_fdi_pll_enable(crtc);
3004
    else
3005
        ironlake_fdi_disable(crtc);
3006
 
3007
    /* Enable panel fitting for LVDS */
3008
    if (dev_priv->pch_pf_size &&
3009
        (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3010
        /* Force use of hard-coded filter coefficients
3011
         * as some pre-programmed values are broken,
3012
         * e.g. x201.
3013
         */
3014
        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3015
        I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3016
        I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3017
    }
3018
 
3019
    /*
3020
     * On ILK+ LUT must be loaded before the pipe is running but with
3021
     * clocks enabled
3022
     */
3023
    intel_crtc_load_lut(crtc);
3024
 
3025
    intel_enable_pipe(dev_priv, pipe, is_pch_port);
3026
    intel_enable_plane(dev_priv, plane, pipe);
3027
 
3028
    if (is_pch_port)
3029
        ironlake_pch_enable(crtc);
3030
 
3031
    mutex_lock(&dev->struct_mutex);
3032
    intel_update_fbc(dev);
3033
    mutex_unlock(&dev->struct_mutex);
3034
 
3035
//    intel_crtc_update_cursor(crtc, true);
3036
}
3037
 
3038
static void ironlake_crtc_disable(struct drm_crtc *crtc)
3039
{
3040
    struct drm_device *dev = crtc->dev;
3041
    struct drm_i915_private *dev_priv = dev->dev_private;
3042
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3043
    int pipe = intel_crtc->pipe;
3044
    int plane = intel_crtc->plane;
3045
    u32 reg, temp;
3046
 
3047
    if (!intel_crtc->active)
3048
        return;
3049
 
2336 Serge 3050
    ENTER();
3051
 
2327 Serge 3052
    intel_crtc_wait_for_pending_flips(crtc);
3053
//    drm_vblank_off(dev, pipe);
3054
//    intel_crtc_update_cursor(crtc, false);
3055
 
3056
    intel_disable_plane(dev_priv, plane, pipe);
3057
 
3058
    if (dev_priv->cfb_plane == plane)
3059
        intel_disable_fbc(dev);
3060
 
3061
    intel_disable_pipe(dev_priv, pipe);
3062
 
3063
    /* Disable PF */
3064
    I915_WRITE(PF_CTL(pipe), 0);
3065
    I915_WRITE(PF_WIN_SZ(pipe), 0);
3066
 
3067
    ironlake_fdi_disable(crtc);
3068
 
3069
    /* This is a horrible layering violation; we should be doing this in
3070
     * the connector/encoder ->prepare instead, but we don't always have
3071
     * enough information there about the config to know whether it will
3072
     * actually be necessary or just cause undesired flicker.
3073
     */
3074
    intel_disable_pch_ports(dev_priv, pipe);
3075
 
3076
    intel_disable_transcoder(dev_priv, pipe);
3077
 
3078
    if (HAS_PCH_CPT(dev)) {
3079
        /* disable TRANS_DP_CTL */
3080
        reg = TRANS_DP_CTL(pipe);
3081
        temp = I915_READ(reg);
3082
        temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3083
        temp |= TRANS_DP_PORT_SEL_NONE;
3084
        I915_WRITE(reg, temp);
3085
 
3086
        /* disable DPLL_SEL */
3087
        temp = I915_READ(PCH_DPLL_SEL);
3088
        switch (pipe) {
3089
        case 0:
2342 Serge 3090
			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
2327 Serge 3091
            break;
3092
        case 1:
3093
            temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3094
            break;
3095
        case 2:
2342 Serge 3096
			/* C shares PLL A or B */
2327 Serge 3097
            temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3098
            break;
3099
        default:
3100
            BUG(); /* wtf */
3101
        }
3102
        I915_WRITE(PCH_DPLL_SEL, temp);
3103
    }
3104
 
3105
    /* disable PCH DPLL */
2342 Serge 3106
	if (!intel_crtc->no_pll)
3107
    	intel_disable_pch_pll(dev_priv, pipe);
2327 Serge 3108
 
3109
    /* Switch from PCDclk to Rawclk */
3110
    reg = FDI_RX_CTL(pipe);
3111
    temp = I915_READ(reg);
3112
    I915_WRITE(reg, temp & ~FDI_PCDCLK);
3113
 
3114
    /* Disable CPU FDI TX PLL */
3115
    reg = FDI_TX_CTL(pipe);
3116
    temp = I915_READ(reg);
3117
    I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3118
 
3119
    POSTING_READ(reg);
3120
    udelay(100);
3121
 
3122
    reg = FDI_RX_CTL(pipe);
3123
    temp = I915_READ(reg);
3124
    I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3125
 
3126
    /* Wait for the clocks to turn off. */
3127
    POSTING_READ(reg);
3128
    udelay(100);
3129
 
3130
    intel_crtc->active = false;
3131
    intel_update_watermarks(dev);
3132
 
3133
    mutex_lock(&dev->struct_mutex);
3134
    intel_update_fbc(dev);
3135
    intel_clear_scanline_wait(dev);
3136
    mutex_unlock(&dev->struct_mutex);
2336 Serge 3137
 
3138
    LEAVE();
3139
 
2327 Serge 3140
}
3141
 
3142
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3143
{
3144
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3145
    int pipe = intel_crtc->pipe;
3146
    int plane = intel_crtc->plane;
3147
 
3148
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3149
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3150
     */
3151
    switch (mode) {
3152
    case DRM_MODE_DPMS_ON:
3153
    case DRM_MODE_DPMS_STANDBY:
3154
    case DRM_MODE_DPMS_SUSPEND:
3155
        DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3156
        ironlake_crtc_enable(crtc);
3157
        break;
3158
 
3159
    case DRM_MODE_DPMS_OFF:
3160
        DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3161
        ironlake_crtc_disable(crtc);
3162
        break;
3163
    }
3164
}
3165
 
3166
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3167
{
3168
	if (!enable && intel_crtc->overlay) {
3169
		struct drm_device *dev = intel_crtc->base.dev;
3170
		struct drm_i915_private *dev_priv = dev->dev_private;
3171
 
3172
		mutex_lock(&dev->struct_mutex);
3173
		dev_priv->mm.interruptible = false;
3174
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3175
		dev_priv->mm.interruptible = true;
3176
		mutex_unlock(&dev->struct_mutex);
3177
	}
3178
 
3179
	/* Let userspace switch the overlay on again. In most cases userspace
3180
	 * has to recompute where to put it anyway.
3181
	 */
3182
}
3183
 
3184
static void i9xx_crtc_enable(struct drm_crtc *crtc)
3185
{
3186
    struct drm_device *dev = crtc->dev;
3187
    struct drm_i915_private *dev_priv = dev->dev_private;
3188
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3189
    int pipe = intel_crtc->pipe;
3190
    int plane = intel_crtc->plane;
3191
 
3192
    if (intel_crtc->active)
3193
        return;
3194
 
3195
    intel_crtc->active = true;
3196
    intel_update_watermarks(dev);
3197
 
3198
    intel_enable_pll(dev_priv, pipe);
3199
    intel_enable_pipe(dev_priv, pipe, false);
3200
    intel_enable_plane(dev_priv, plane, pipe);
3201
 
3202
    intel_crtc_load_lut(crtc);
3203
    intel_update_fbc(dev);
3204
 
3205
    /* Give the overlay scaler a chance to enable if it's on this pipe */
3206
    intel_crtc_dpms_overlay(intel_crtc, true);
3207
//    intel_crtc_update_cursor(crtc, true);
3208
}
3209
 
3210
static void i9xx_crtc_disable(struct drm_crtc *crtc)
3211
{
3212
    struct drm_device *dev = crtc->dev;
3213
    struct drm_i915_private *dev_priv = dev->dev_private;
3214
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3215
    int pipe = intel_crtc->pipe;
3216
    int plane = intel_crtc->plane;
3217
 
3218
    if (!intel_crtc->active)
3219
        return;
3220
 
3221
    /* Give the overlay scaler a chance to disable if it's on this pipe */
3222
    intel_crtc_wait_for_pending_flips(crtc);
3223
//    drm_vblank_off(dev, pipe);
3224
    intel_crtc_dpms_overlay(intel_crtc, false);
3225
//    intel_crtc_update_cursor(crtc, false);
3226
 
3227
    if (dev_priv->cfb_plane == plane)
3228
        intel_disable_fbc(dev);
3229
 
3230
    intel_disable_plane(dev_priv, plane, pipe);
3231
    intel_disable_pipe(dev_priv, pipe);
3232
    intel_disable_pll(dev_priv, pipe);
3233
 
3234
    intel_crtc->active = false;
3235
    intel_update_fbc(dev);
3236
    intel_update_watermarks(dev);
3237
    intel_clear_scanline_wait(dev);
3238
}
3239
 
3240
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3241
{
3242
    /* XXX: When our outputs are all unaware of DPMS modes other than off
3243
     * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3244
     */
3245
    switch (mode) {
3246
    case DRM_MODE_DPMS_ON:
3247
    case DRM_MODE_DPMS_STANDBY:
3248
    case DRM_MODE_DPMS_SUSPEND:
3249
        i9xx_crtc_enable(crtc);
3250
        break;
3251
    case DRM_MODE_DPMS_OFF:
3252
        i9xx_crtc_disable(crtc);
3253
        break;
3254
    }
3255
}
3256
 
2330 Serge 3257
/**
3258
 * Sets the power management mode of the pipe and plane.
3259
 */
3260
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3261
{
3262
	struct drm_device *dev = crtc->dev;
3263
	struct drm_i915_private *dev_priv = dev->dev_private;
3264
	struct drm_i915_master_private *master_priv;
3265
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3266
	int pipe = intel_crtc->pipe;
3267
	bool enabled;
2327 Serge 3268
 
2330 Serge 3269
	if (intel_crtc->dpms_mode == mode)
3270
		return;
2327 Serge 3271
 
2330 Serge 3272
	intel_crtc->dpms_mode = mode;
2327 Serge 3273
 
2330 Serge 3274
	dev_priv->display.dpms(crtc, mode);
2327 Serge 3275
 
2340 Serge 3276
#if 0
2330 Serge 3277
	if (!dev->primary->master)
3278
		return;
2327 Serge 3279
 
2330 Serge 3280
	master_priv = dev->primary->master->driver_priv;
3281
	if (!master_priv->sarea_priv)
3282
		return;
2327 Serge 3283
 
2330 Serge 3284
	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
2327 Serge 3285
 
2330 Serge 3286
	switch (pipe) {
3287
	case 0:
3288
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3289
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3290
		break;
3291
	case 1:
3292
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3293
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3294
		break;
3295
	default:
3296
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3297
		break;
3298
	}
2340 Serge 3299
#endif
3300
 
2330 Serge 3301
}
2327 Serge 3302
 
2330 Serge 3303
static void intel_crtc_disable(struct drm_crtc *crtc)
3304
{
3305
	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3306
	struct drm_device *dev = crtc->dev;
2327 Serge 3307
 
2330 Serge 3308
	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
2327 Serge 3309
 
2330 Serge 3310
	if (crtc->fb) {
3311
		mutex_lock(&dev->struct_mutex);
2344 Serge 3312
		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2330 Serge 3313
		mutex_unlock(&dev->struct_mutex);
3314
	}
3315
}
2327 Serge 3316
 
2330 Serge 3317
/* Prepare for a mode set.
3318
 *
3319
 * Note we could be a lot smarter here.  We need to figure out which outputs
3320
 * will be enabled, which disabled (in short, how the config will changes)
3321
 * and perform the minimum necessary steps to accomplish that, e.g. updating
3322
 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3323
 * panel fitting is in the proper state, etc.
3324
 */
3325
static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3326
{
3327
	i9xx_crtc_disable(crtc);
3328
}
2327 Serge 3329
 
2330 Serge 3330
static void i9xx_crtc_commit(struct drm_crtc *crtc)
3331
{
3332
	i9xx_crtc_enable(crtc);
3333
}
2327 Serge 3334
 
2330 Serge 3335
static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3336
{
3337
	ironlake_crtc_disable(crtc);
3338
}
2327 Serge 3339
 
2330 Serge 3340
static void ironlake_crtc_commit(struct drm_crtc *crtc)
3341
{
3342
	ironlake_crtc_enable(crtc);
3343
}
2327 Serge 3344
 
2342 Serge 3345
void intel_encoder_prepare(struct drm_encoder *encoder)
2330 Serge 3346
{
3347
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3348
	/* lvds has its own version of prepare see intel_lvds_prepare */
3349
	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3350
}
2327 Serge 3351
 
2342 Serge 3352
void intel_encoder_commit(struct drm_encoder *encoder)
2330 Serge 3353
{
3354
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
2342 Serge 3355
	struct drm_device *dev = encoder->dev;
3356
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3357
	struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3358
 
2330 Serge 3359
	/* lvds has its own version of commit see intel_lvds_commit */
3360
	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
2342 Serge 3361
 
3362
	if (HAS_PCH_CPT(dev))
3363
		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
2330 Serge 3364
}
2327 Serge 3365
 
2330 Serge 3366
void intel_encoder_destroy(struct drm_encoder *encoder)
3367
{
3368
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3369
 
3370
	drm_encoder_cleanup(encoder);
3371
	kfree(intel_encoder);
3372
}
3373
 
3374
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3375
				  struct drm_display_mode *mode,
3376
				  struct drm_display_mode *adjusted_mode)
3377
{
3378
	struct drm_device *dev = crtc->dev;
3379
 
3380
	if (HAS_PCH_SPLIT(dev)) {
3381
		/* FDI link clock is fixed at 2.7G */
3382
		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3383
			return false;
3384
	}
3385
 
3386
	/* XXX some encoders set the crtcinfo, others don't.
3387
	 * Obviously we need some form of conflict resolution here...
3388
	 */
3389
	if (adjusted_mode->crtc_htotal == 0)
3390
		drm_mode_set_crtcinfo(adjusted_mode, 0);
3391
 
3392
	return true;
3393
}
3394
 
2327 Serge 3395
static int i945_get_display_clock_speed(struct drm_device *dev)
3396
{
3397
	return 400000;
3398
}
3399
 
3400
static int i915_get_display_clock_speed(struct drm_device *dev)
3401
{
3402
	return 333000;
3403
}
3404
 
3405
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3406
{
3407
	return 200000;
3408
}
3409
 
3410
static int i915gm_get_display_clock_speed(struct drm_device *dev)
3411
{
3412
	u16 gcfgc = 0;
3413
 
3414
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3415
 
3416
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3417
		return 133000;
3418
	else {
3419
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3420
		case GC_DISPLAY_CLOCK_333_MHZ:
3421
			return 333000;
3422
		default:
3423
		case GC_DISPLAY_CLOCK_190_200_MHZ:
3424
			return 190000;
3425
		}
3426
	}
3427
}
3428
 
3429
static int i865_get_display_clock_speed(struct drm_device *dev)
3430
{
3431
	return 266000;
3432
}
3433
 
3434
static int i855_get_display_clock_speed(struct drm_device *dev)
3435
{
3436
	u16 hpllcc = 0;
3437
	/* Assume that the hardware is in the high speed state.  This
3438
	 * should be the default.
3439
	 */
3440
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3441
	case GC_CLOCK_133_200:
3442
	case GC_CLOCK_100_200:
3443
		return 200000;
3444
	case GC_CLOCK_166_250:
3445
		return 250000;
3446
	case GC_CLOCK_100_133:
3447
		return 133000;
3448
	}
3449
 
3450
	/* Shouldn't happen */
3451
	return 0;
3452
}
3453
 
3454
static int i830_get_display_clock_speed(struct drm_device *dev)
3455
{
3456
	return 133000;
3457
}
3458
 
3459
struct fdi_m_n {
3460
    u32        tu;
3461
    u32        gmch_m;
3462
    u32        gmch_n;
3463
    u32        link_m;
3464
    u32        link_n;
3465
};
3466
 
3467
static void
3468
fdi_reduce_ratio(u32 *num, u32 *den)
3469
{
3470
	while (*num > 0xffffff || *den > 0xffffff) {
3471
		*num >>= 1;
3472
		*den >>= 1;
3473
	}
3474
}
3475
 
3476
static void
3477
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3478
		     int link_clock, struct fdi_m_n *m_n)
3479
{
3480
	m_n->tu = 64; /* default size */
3481
 
3482
	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3483
	m_n->gmch_m = bits_per_pixel * pixel_clock;
3484
	m_n->gmch_n = link_clock * nlanes * 8;
3485
	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3486
 
3487
	m_n->link_m = pixel_clock;
3488
	m_n->link_n = link_clock;
3489
	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3490
}
3491
 
3492
 
3493
struct intel_watermark_params {
3494
    unsigned long fifo_size;
3495
    unsigned long max_wm;
3496
    unsigned long default_wm;
3497
    unsigned long guard_size;
3498
    unsigned long cacheline_size;
3499
};
3500
 
3501
/* Pineview has different values for various configs */
3502
static const struct intel_watermark_params pineview_display_wm = {
3503
    PINEVIEW_DISPLAY_FIFO,
3504
    PINEVIEW_MAX_WM,
3505
    PINEVIEW_DFT_WM,
3506
    PINEVIEW_GUARD_WM,
3507
    PINEVIEW_FIFO_LINE_SIZE
3508
};
3509
static const struct intel_watermark_params pineview_display_hplloff_wm = {
3510
    PINEVIEW_DISPLAY_FIFO,
3511
    PINEVIEW_MAX_WM,
3512
    PINEVIEW_DFT_HPLLOFF_WM,
3513
    PINEVIEW_GUARD_WM,
3514
    PINEVIEW_FIFO_LINE_SIZE
3515
};
3516
static const struct intel_watermark_params pineview_cursor_wm = {
3517
    PINEVIEW_CURSOR_FIFO,
3518
    PINEVIEW_CURSOR_MAX_WM,
3519
    PINEVIEW_CURSOR_DFT_WM,
3520
    PINEVIEW_CURSOR_GUARD_WM,
3521
    PINEVIEW_FIFO_LINE_SIZE,
3522
};
3523
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3524
    PINEVIEW_CURSOR_FIFO,
3525
    PINEVIEW_CURSOR_MAX_WM,
3526
    PINEVIEW_CURSOR_DFT_WM,
3527
    PINEVIEW_CURSOR_GUARD_WM,
3528
    PINEVIEW_FIFO_LINE_SIZE
3529
};
3530
static const struct intel_watermark_params g4x_wm_info = {
3531
    G4X_FIFO_SIZE,
3532
    G4X_MAX_WM,
3533
    G4X_MAX_WM,
3534
    2,
3535
    G4X_FIFO_LINE_SIZE,
3536
};
3537
static const struct intel_watermark_params g4x_cursor_wm_info = {
3538
    I965_CURSOR_FIFO,
3539
    I965_CURSOR_MAX_WM,
3540
    I965_CURSOR_DFT_WM,
3541
    2,
3542
    G4X_FIFO_LINE_SIZE,
3543
};
3544
static const struct intel_watermark_params i965_cursor_wm_info = {
3545
    I965_CURSOR_FIFO,
3546
    I965_CURSOR_MAX_WM,
3547
    I965_CURSOR_DFT_WM,
3548
    2,
3549
    I915_FIFO_LINE_SIZE,
3550
};
3551
static const struct intel_watermark_params i945_wm_info = {
3552
    I945_FIFO_SIZE,
3553
    I915_MAX_WM,
3554
    1,
3555
    2,
3556
    I915_FIFO_LINE_SIZE
3557
};
3558
static const struct intel_watermark_params i915_wm_info = {
3559
    I915_FIFO_SIZE,
3560
    I915_MAX_WM,
3561
    1,
3562
    2,
3563
    I915_FIFO_LINE_SIZE
3564
};
3565
static const struct intel_watermark_params i855_wm_info = {
3566
    I855GM_FIFO_SIZE,
3567
    I915_MAX_WM,
3568
    1,
3569
    2,
3570
    I830_FIFO_LINE_SIZE
3571
};
3572
static const struct intel_watermark_params i830_wm_info = {
3573
    I830_FIFO_SIZE,
3574
    I915_MAX_WM,
3575
    1,
3576
    2,
3577
    I830_FIFO_LINE_SIZE
3578
};
3579
 
3580
static const struct intel_watermark_params ironlake_display_wm_info = {
3581
    ILK_DISPLAY_FIFO,
3582
    ILK_DISPLAY_MAXWM,
3583
    ILK_DISPLAY_DFTWM,
3584
    2,
3585
    ILK_FIFO_LINE_SIZE
3586
};
3587
static const struct intel_watermark_params ironlake_cursor_wm_info = {
3588
    ILK_CURSOR_FIFO,
3589
    ILK_CURSOR_MAXWM,
3590
    ILK_CURSOR_DFTWM,
3591
    2,
3592
    ILK_FIFO_LINE_SIZE
3593
};
3594
static const struct intel_watermark_params ironlake_display_srwm_info = {
3595
    ILK_DISPLAY_SR_FIFO,
3596
    ILK_DISPLAY_MAX_SRWM,
3597
    ILK_DISPLAY_DFT_SRWM,
3598
    2,
3599
    ILK_FIFO_LINE_SIZE
3600
};
3601
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3602
    ILK_CURSOR_SR_FIFO,
3603
    ILK_CURSOR_MAX_SRWM,
3604
    ILK_CURSOR_DFT_SRWM,
3605
    2,
3606
    ILK_FIFO_LINE_SIZE
3607
};
3608
 
3609
static const struct intel_watermark_params sandybridge_display_wm_info = {
3610
    SNB_DISPLAY_FIFO,
3611
    SNB_DISPLAY_MAXWM,
3612
    SNB_DISPLAY_DFTWM,
3613
    2,
3614
    SNB_FIFO_LINE_SIZE
3615
};
3616
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3617
    SNB_CURSOR_FIFO,
3618
    SNB_CURSOR_MAXWM,
3619
    SNB_CURSOR_DFTWM,
3620
    2,
3621
    SNB_FIFO_LINE_SIZE
3622
};
3623
static const struct intel_watermark_params sandybridge_display_srwm_info = {
3624
    SNB_DISPLAY_SR_FIFO,
3625
    SNB_DISPLAY_MAX_SRWM,
3626
    SNB_DISPLAY_DFT_SRWM,
3627
    2,
3628
    SNB_FIFO_LINE_SIZE
3629
};
3630
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3631
    SNB_CURSOR_SR_FIFO,
3632
    SNB_CURSOR_MAX_SRWM,
3633
    SNB_CURSOR_DFT_SRWM,
3634
    2,
3635
    SNB_FIFO_LINE_SIZE
3636
};
3637
 
3638
 
3639
/**
3640
 * intel_calculate_wm - calculate watermark level
3641
 * @clock_in_khz: pixel clock
3642
 * @wm: chip FIFO params
3643
 * @pixel_size: display pixel size
3644
 * @latency_ns: memory latency for the platform
3645
 *
3646
 * Calculate the watermark level (the level at which the display plane will
3647
 * start fetching from memory again).  Each chip has a different display
3648
 * FIFO size and allocation, so the caller needs to figure that out and pass
3649
 * in the correct intel_watermark_params structure.
3650
 *
3651
 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3652
 * on the pixel size.  When it reaches the watermark level, it'll start
3653
 * fetching FIFO line sized based chunks from memory until the FIFO fills
3654
 * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3655
 * will occur, and a display engine hang could result.
3656
 */
3657
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3658
                    const struct intel_watermark_params *wm,
3659
                    int fifo_size,
3660
                    int pixel_size,
3661
                    unsigned long latency_ns)
3662
{
3663
    long entries_required, wm_size;
3664
 
3665
    /*
3666
     * Note: we need to make sure we don't overflow for various clock &
3667
     * latency values.
3668
     * clocks go from a few thousand to several hundred thousand.
3669
     * latency is usually a few thousand
3670
     */
3671
    entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3672
        1000;
3673
    entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3674
 
3675
    DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3676
 
3677
    wm_size = fifo_size - (entries_required + wm->guard_size);
3678
 
3679
    DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3680
 
3681
    /* Don't promote wm_size to unsigned... */
3682
    if (wm_size > (long)wm->max_wm)
3683
        wm_size = wm->max_wm;
3684
    if (wm_size <= 0)
3685
        wm_size = wm->default_wm;
3686
    return wm_size;
3687
}
3688
 
3689
struct cxsr_latency {
3690
    int is_desktop;
3691
    int is_ddr3;
3692
    unsigned long fsb_freq;
3693
    unsigned long mem_freq;
3694
    unsigned long display_sr;
3695
    unsigned long display_hpll_disable;
3696
    unsigned long cursor_sr;
3697
    unsigned long cursor_hpll_disable;
3698
};
3699
 
3700
static const struct cxsr_latency cxsr_latency_table[] = {
3701
    {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3702
    {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3703
    {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3704
    {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3705
    {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3706
 
3707
    {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3708
    {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3709
    {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3710
    {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3711
    {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3712
 
3713
    {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3714
    {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3715
    {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3716
    {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3717
    {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3718
 
3719
    {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3720
    {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3721
    {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3722
    {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3723
    {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3724
 
3725
    {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3726
    {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3727
    {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3728
    {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3729
    {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3730
 
3731
    {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3732
    {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3733
    {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3734
    {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3735
    {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3736
};
3737
 
3738
static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3739
                             int is_ddr3,
3740
                             int fsb,
3741
                             int mem)
3742
{
3743
    const struct cxsr_latency *latency;
3744
    int i;
3745
 
3746
    if (fsb == 0 || mem == 0)
3747
        return NULL;
3748
 
3749
    for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3750
        latency = &cxsr_latency_table[i];
3751
        if (is_desktop == latency->is_desktop &&
3752
            is_ddr3 == latency->is_ddr3 &&
3753
            fsb == latency->fsb_freq && mem == latency->mem_freq)
3754
            return latency;
3755
    }
3756
 
3757
    DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3758
 
3759
    return NULL;
3760
}
3761
 
3762
static void pineview_disable_cxsr(struct drm_device *dev)
3763
{
3764
    struct drm_i915_private *dev_priv = dev->dev_private;
3765
 
3766
    /* deactivate cxsr */
3767
    I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3768
}
3769
 
3770
/*
3771
 * Latency for FIFO fetches is dependent on several factors:
3772
 *   - memory configuration (speed, channels)
3773
 *   - chipset
3774
 *   - current MCH state
3775
 * It can be fairly high in some situations, so here we assume a fairly
3776
 * pessimal value.  It's a tradeoff between extra memory fetches (if we
3777
 * set this value too high, the FIFO will fetch frequently to stay full)
3778
 * and power consumption (set it too low to save power and we might see
3779
 * FIFO underruns and display "flicker").
3780
 *
3781
 * A value of 5us seems to be a good balance; safe for very low end
3782
 * platforms but not overly aggressive on lower latency configs.
3783
 */
3784
static const int latency_ns = 5000;
3785
 
3786
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3787
{
3788
	struct drm_i915_private *dev_priv = dev->dev_private;
3789
	uint32_t dsparb = I915_READ(DSPARB);
3790
	int size;
3791
 
3792
	size = dsparb & 0x7f;
3793
	if (plane)
3794
		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3795
 
3796
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3797
		      plane ? "B" : "A", size);
3798
 
3799
	return size;
3800
}
3801
 
3802
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3803
{
3804
	struct drm_i915_private *dev_priv = dev->dev_private;
3805
	uint32_t dsparb = I915_READ(DSPARB);
3806
	int size;
3807
 
3808
	size = dsparb & 0x1ff;
3809
	if (plane)
3810
		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3811
	size >>= 1; /* Convert to cachelines */
3812
 
3813
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3814
		      plane ? "B" : "A", size);
3815
 
3816
	return size;
3817
}
3818
 
3819
static int i845_get_fifo_size(struct drm_device *dev, int plane)
3820
{
3821
	struct drm_i915_private *dev_priv = dev->dev_private;
3822
	uint32_t dsparb = I915_READ(DSPARB);
3823
	int size;
3824
 
3825
	size = dsparb & 0x7f;
3826
	size >>= 2; /* Convert to cachelines */
3827
 
3828
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3829
		      plane ? "B" : "A",
3830
		      size);
3831
 
3832
	return size;
3833
}
3834
 
3835
static int i830_get_fifo_size(struct drm_device *dev, int plane)
3836
{
3837
	struct drm_i915_private *dev_priv = dev->dev_private;
3838
	uint32_t dsparb = I915_READ(DSPARB);
3839
	int size;
3840
 
3841
	size = dsparb & 0x7f;
3842
	size >>= 1; /* Convert to cachelines */
3843
 
3844
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3845
		      plane ? "B" : "A", size);
3846
 
3847
	return size;
3848
}
3849
 
3850
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3851
{
3852
    struct drm_crtc *crtc, *enabled = NULL;
3853
 
3854
    list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3855
        if (crtc->enabled && crtc->fb) {
3856
            if (enabled)
3857
                return NULL;
3858
            enabled = crtc;
3859
        }
3860
    }
3861
 
3862
    return enabled;
3863
}
3864
 
3865
static void pineview_update_wm(struct drm_device *dev)
3866
{
3867
	struct drm_i915_private *dev_priv = dev->dev_private;
3868
	struct drm_crtc *crtc;
3869
	const struct cxsr_latency *latency;
3870
	u32 reg;
3871
	unsigned long wm;
3872
 
3873
	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3874
					 dev_priv->fsb_freq, dev_priv->mem_freq);
3875
	if (!latency) {
3876
		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3877
		pineview_disable_cxsr(dev);
3878
		return;
3879
	}
3880
 
3881
	crtc = single_enabled_crtc(dev);
3882
	if (crtc) {
3883
		int clock = crtc->mode.clock;
3884
		int pixel_size = crtc->fb->bits_per_pixel / 8;
3885
 
3886
		/* Display SR */
3887
		wm = intel_calculate_wm(clock, &pineview_display_wm,
3888
					pineview_display_wm.fifo_size,
3889
					pixel_size, latency->display_sr);
3890
		reg = I915_READ(DSPFW1);
3891
		reg &= ~DSPFW_SR_MASK;
3892
		reg |= wm << DSPFW_SR_SHIFT;
3893
		I915_WRITE(DSPFW1, reg);
3894
		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
3895
 
3896
		/* cursor SR */
3897
		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
3898
					pineview_display_wm.fifo_size,
3899
					pixel_size, latency->cursor_sr);
3900
		reg = I915_READ(DSPFW3);
3901
		reg &= ~DSPFW_CURSOR_SR_MASK;
3902
		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
3903
		I915_WRITE(DSPFW3, reg);
3904
 
3905
		/* Display HPLL off SR */
3906
		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
3907
					pineview_display_hplloff_wm.fifo_size,
3908
					pixel_size, latency->display_hpll_disable);
3909
		reg = I915_READ(DSPFW3);
3910
		reg &= ~DSPFW_HPLL_SR_MASK;
3911
		reg |= wm & DSPFW_HPLL_SR_MASK;
3912
		I915_WRITE(DSPFW3, reg);
3913
 
3914
		/* cursor HPLL off SR */
3915
		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
3916
					pineview_display_hplloff_wm.fifo_size,
3917
					pixel_size, latency->cursor_hpll_disable);
3918
		reg = I915_READ(DSPFW3);
3919
		reg &= ~DSPFW_HPLL_CURSOR_MASK;
3920
		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
3921
		I915_WRITE(DSPFW3, reg);
3922
		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
3923
 
3924
		/* activate cxsr */
3925
		I915_WRITE(DSPFW3,
3926
			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
3927
		DRM_DEBUG_KMS("Self-refresh is enabled\n");
3928
	} else {
3929
		pineview_disable_cxsr(dev);
3930
		DRM_DEBUG_KMS("Self-refresh is disabled\n");
3931
	}
3932
}
3933
 
3934
static bool g4x_compute_wm0(struct drm_device *dev,
3935
                int plane,
3936
                const struct intel_watermark_params *display,
3937
                int display_latency_ns,
3938
                const struct intel_watermark_params *cursor,
3939
                int cursor_latency_ns,
3940
                int *plane_wm,
3941
                int *cursor_wm)
3942
{
3943
    struct drm_crtc *crtc;
3944
    int htotal, hdisplay, clock, pixel_size;
3945
    int line_time_us, line_count;
3946
    int entries, tlb_miss;
3947
 
3948
    crtc = intel_get_crtc_for_plane(dev, plane);
3949
    if (crtc->fb == NULL || !crtc->enabled) {
3950
        *cursor_wm = cursor->guard_size;
3951
        *plane_wm = display->guard_size;
3952
        return false;
3953
    }
3954
 
3955
    htotal = crtc->mode.htotal;
3956
    hdisplay = crtc->mode.hdisplay;
3957
    clock = crtc->mode.clock;
3958
    pixel_size = crtc->fb->bits_per_pixel / 8;
3959
 
3960
    /* Use the small buffer method to calculate plane watermark */
3961
    entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3962
    tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3963
    if (tlb_miss > 0)
3964
        entries += tlb_miss;
3965
    entries = DIV_ROUND_UP(entries, display->cacheline_size);
3966
    *plane_wm = entries + display->guard_size;
3967
    if (*plane_wm > (int)display->max_wm)
3968
        *plane_wm = display->max_wm;
3969
 
3970
    /* Use the large buffer method to calculate cursor watermark */
3971
    line_time_us = ((htotal * 1000) / clock);
3972
    line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3973
    entries = line_count * 64 * pixel_size;
3974
    tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3975
    if (tlb_miss > 0)
3976
        entries += tlb_miss;
3977
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3978
    *cursor_wm = entries + cursor->guard_size;
3979
    if (*cursor_wm > (int)cursor->max_wm)
3980
        *cursor_wm = (int)cursor->max_wm;
3981
 
3982
    return true;
3983
}
3984
 
3985
/*
3986
 * Check the wm result.
3987
 *
3988
 * If any calculated watermark values is larger than the maximum value that
3989
 * can be programmed into the associated watermark register, that watermark
3990
 * must be disabled.
3991
 */
3992
static bool g4x_check_srwm(struct drm_device *dev,
3993
			   int display_wm, int cursor_wm,
3994
			   const struct intel_watermark_params *display,
3995
			   const struct intel_watermark_params *cursor)
3996
{
3997
	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
3998
		      display_wm, cursor_wm);
3999
 
4000
	if (display_wm > display->max_wm) {
4001
		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4002
			      display_wm, display->max_wm);
4003
		return false;
4004
	}
4005
 
4006
	if (cursor_wm > cursor->max_wm) {
4007
		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4008
			      cursor_wm, cursor->max_wm);
4009
		return false;
4010
	}
4011
 
4012
	if (!(display_wm || cursor_wm)) {
4013
		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4014
		return false;
4015
	}
4016
 
4017
	return true;
4018
}
4019
 
4020
static bool g4x_compute_srwm(struct drm_device *dev,
4021
			     int plane,
4022
			     int latency_ns,
4023
			     const struct intel_watermark_params *display,
4024
			     const struct intel_watermark_params *cursor,
4025
			     int *display_wm, int *cursor_wm)
4026
{
4027
	struct drm_crtc *crtc;
4028
	int hdisplay, htotal, pixel_size, clock;
4029
	unsigned long line_time_us;
4030
	int line_count, line_size;
4031
	int small, large;
4032
	int entries;
4033
 
4034
	if (!latency_ns) {
4035
		*display_wm = *cursor_wm = 0;
4036
		return false;
4037
	}
4038
 
4039
	crtc = intel_get_crtc_for_plane(dev, plane);
4040
	hdisplay = crtc->mode.hdisplay;
4041
	htotal = crtc->mode.htotal;
4042
	clock = crtc->mode.clock;
4043
	pixel_size = crtc->fb->bits_per_pixel / 8;
4044
 
4045
	line_time_us = (htotal * 1000) / clock;
4046
	line_count = (latency_ns / line_time_us + 1000) / 1000;
4047
	line_size = hdisplay * pixel_size;
4048
 
4049
	/* Use the minimum of the small and large buffer method for primary */
4050
	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4051
	large = line_count * line_size;
4052
 
4053
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4054
	*display_wm = entries + display->guard_size;
4055
 
4056
	/* calculate the self-refresh watermark for display cursor */
4057
	entries = line_count * pixel_size * 64;
4058
	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4059
	*cursor_wm = entries + cursor->guard_size;
4060
 
4061
	return g4x_check_srwm(dev,
4062
			      *display_wm, *cursor_wm,
4063
			      display, cursor);
4064
}
4065
 
4066
#define single_plane_enabled(mask) is_power_of_2(mask)
4067
 
4068
static void g4x_update_wm(struct drm_device *dev)
4069
{
4070
	static const int sr_latency_ns = 12000;
4071
	struct drm_i915_private *dev_priv = dev->dev_private;
4072
	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4073
	int plane_sr, cursor_sr;
4074
	unsigned int enabled = 0;
4075
 
4076
	if (g4x_compute_wm0(dev, 0,
4077
			    &g4x_wm_info, latency_ns,
4078
			    &g4x_cursor_wm_info, latency_ns,
4079
			    &planea_wm, &cursora_wm))
4080
		enabled |= 1;
4081
 
4082
	if (g4x_compute_wm0(dev, 1,
4083
			    &g4x_wm_info, latency_ns,
4084
			    &g4x_cursor_wm_info, latency_ns,
4085
			    &planeb_wm, &cursorb_wm))
4086
		enabled |= 2;
4087
 
4088
	plane_sr = cursor_sr = 0;
4089
	if (single_plane_enabled(enabled) &&
4090
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
4091
			     sr_latency_ns,
4092
			     &g4x_wm_info,
4093
			     &g4x_cursor_wm_info,
4094
			     &plane_sr, &cursor_sr))
4095
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4096
	else
4097
		I915_WRITE(FW_BLC_SELF,
4098
			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4099
 
4100
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4101
		      planea_wm, cursora_wm,
4102
		      planeb_wm, cursorb_wm,
4103
		      plane_sr, cursor_sr);
4104
 
4105
	I915_WRITE(DSPFW1,
4106
		   (plane_sr << DSPFW_SR_SHIFT) |
4107
		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4108
		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
4109
		   planea_wm);
4110
	I915_WRITE(DSPFW2,
4111
		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4112
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
4113
	/* HPLL off in SR has some issues on G4x... disable it */
4114
	I915_WRITE(DSPFW3,
4115
		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4116
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4117
}
4118
 
4119
static void i965_update_wm(struct drm_device *dev)
4120
{
4121
	struct drm_i915_private *dev_priv = dev->dev_private;
4122
	struct drm_crtc *crtc;
4123
	int srwm = 1;
4124
	int cursor_sr = 16;
4125
 
4126
	/* Calc sr entries for one plane configs */
4127
	crtc = single_enabled_crtc(dev);
4128
	if (crtc) {
4129
		/* self-refresh has much higher latency */
4130
		static const int sr_latency_ns = 12000;
4131
		int clock = crtc->mode.clock;
4132
		int htotal = crtc->mode.htotal;
4133
		int hdisplay = crtc->mode.hdisplay;
4134
		int pixel_size = crtc->fb->bits_per_pixel / 8;
4135
		unsigned long line_time_us;
4136
		int entries;
4137
 
4138
		line_time_us = ((htotal * 1000) / clock);
4139
 
4140
		/* Use ns/us then divide to preserve precision */
4141
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4142
			pixel_size * hdisplay;
4143
		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4144
		srwm = I965_FIFO_SIZE - entries;
4145
		if (srwm < 0)
4146
			srwm = 1;
4147
		srwm &= 0x1ff;
4148
		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4149
			      entries, srwm);
4150
 
4151
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4152
			pixel_size * 64;
4153
		entries = DIV_ROUND_UP(entries,
4154
					  i965_cursor_wm_info.cacheline_size);
4155
		cursor_sr = i965_cursor_wm_info.fifo_size -
4156
			(entries + i965_cursor_wm_info.guard_size);
4157
 
4158
		if (cursor_sr > i965_cursor_wm_info.max_wm)
4159
			cursor_sr = i965_cursor_wm_info.max_wm;
4160
 
4161
		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4162
			      "cursor %d\n", srwm, cursor_sr);
4163
 
4164
		if (IS_CRESTLINE(dev))
4165
			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4166
	} else {
4167
		/* Turn off self refresh if both pipes are enabled */
4168
		if (IS_CRESTLINE(dev))
4169
			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4170
				   & ~FW_BLC_SELF_EN);
4171
	}
4172
 
4173
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4174
		      srwm);
4175
 
4176
	/* 965 has limitations... */
4177
	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4178
		   (8 << 16) | (8 << 8) | (8 << 0));
4179
	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4180
	/* update cursor SR watermark */
4181
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4182
}
4183
 
4184
static void i9xx_update_wm(struct drm_device *dev)
4185
{
4186
	struct drm_i915_private *dev_priv = dev->dev_private;
4187
	const struct intel_watermark_params *wm_info;
4188
	uint32_t fwater_lo;
4189
	uint32_t fwater_hi;
4190
	int cwm, srwm = 1;
4191
	int fifo_size;
4192
	int planea_wm, planeb_wm;
4193
	struct drm_crtc *crtc, *enabled = NULL;
4194
 
4195
	if (IS_I945GM(dev))
4196
		wm_info = &i945_wm_info;
4197
	else if (!IS_GEN2(dev))
4198
		wm_info = &i915_wm_info;
4199
	else
4200
		wm_info = &i855_wm_info;
4201
 
4202
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4203
	crtc = intel_get_crtc_for_plane(dev, 0);
4204
	if (crtc->enabled && crtc->fb) {
4205
		planea_wm = intel_calculate_wm(crtc->mode.clock,
4206
					       wm_info, fifo_size,
4207
					       crtc->fb->bits_per_pixel / 8,
4208
					       latency_ns);
4209
		enabled = crtc;
4210
	} else
4211
		planea_wm = fifo_size - wm_info->guard_size;
4212
 
4213
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4214
	crtc = intel_get_crtc_for_plane(dev, 1);
4215
	if (crtc->enabled && crtc->fb) {
4216
		planeb_wm = intel_calculate_wm(crtc->mode.clock,
4217
					       wm_info, fifo_size,
4218
					       crtc->fb->bits_per_pixel / 8,
4219
					       latency_ns);
4220
		if (enabled == NULL)
4221
			enabled = crtc;
4222
		else
4223
			enabled = NULL;
4224
	} else
4225
		planeb_wm = fifo_size - wm_info->guard_size;
4226
 
4227
	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4228
 
4229
	/*
4230
	 * Overlay gets an aggressive default since video jitter is bad.
4231
	 */
4232
	cwm = 2;
4233
 
4234
	/* Play safe and disable self-refresh before adjusting watermarks. */
4235
	if (IS_I945G(dev) || IS_I945GM(dev))
4236
		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4237
	else if (IS_I915GM(dev))
4238
		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4239
 
4240
	/* Calc sr entries for one plane configs */
4241
	if (HAS_FW_BLC(dev) && enabled) {
4242
		/* self-refresh has much higher latency */
4243
		static const int sr_latency_ns = 6000;
4244
		int clock = enabled->mode.clock;
4245
		int htotal = enabled->mode.htotal;
4246
		int hdisplay = enabled->mode.hdisplay;
4247
		int pixel_size = enabled->fb->bits_per_pixel / 8;
4248
		unsigned long line_time_us;
4249
		int entries;
4250
 
4251
		line_time_us = (htotal * 1000) / clock;
4252
 
4253
		/* Use ns/us then divide to preserve precision */
4254
		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4255
			pixel_size * hdisplay;
4256
		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4257
		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4258
		srwm = wm_info->fifo_size - entries;
4259
		if (srwm < 0)
4260
			srwm = 1;
4261
 
4262
		if (IS_I945G(dev) || IS_I945GM(dev))
4263
			I915_WRITE(FW_BLC_SELF,
4264
				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4265
		else if (IS_I915GM(dev))
4266
			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4267
	}
4268
 
4269
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4270
		      planea_wm, planeb_wm, cwm, srwm);
4271
 
4272
	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4273
	fwater_hi = (cwm & 0x1f);
4274
 
4275
	/* Set request length to 8 cachelines per fetch */
4276
	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4277
	fwater_hi = fwater_hi | (1 << 8);
4278
 
4279
	I915_WRITE(FW_BLC, fwater_lo);
4280
	I915_WRITE(FW_BLC2, fwater_hi);
4281
 
4282
	if (HAS_FW_BLC(dev)) {
4283
		if (enabled) {
4284
			if (IS_I945G(dev) || IS_I945GM(dev))
4285
				I915_WRITE(FW_BLC_SELF,
4286
					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4287
			else if (IS_I915GM(dev))
4288
				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4289
			DRM_DEBUG_KMS("memory self refresh enabled\n");
4290
		} else
4291
			DRM_DEBUG_KMS("memory self refresh disabled\n");
4292
	}
4293
}
4294
 
4295
static void i830_update_wm(struct drm_device *dev)
4296
{
4297
	struct drm_i915_private *dev_priv = dev->dev_private;
4298
	struct drm_crtc *crtc;
4299
	uint32_t fwater_lo;
4300
	int planea_wm;
4301
 
4302
	crtc = single_enabled_crtc(dev);
4303
	if (crtc == NULL)
4304
		return;
4305
 
4306
	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4307
				       dev_priv->display.get_fifo_size(dev, 0),
4308
				       crtc->fb->bits_per_pixel / 8,
4309
				       latency_ns);
4310
	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4311
	fwater_lo |= (3<<8) | planea_wm;
4312
 
4313
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4314
 
4315
	I915_WRITE(FW_BLC, fwater_lo);
4316
}
4317
 
4318
#define ILK_LP0_PLANE_LATENCY		700
4319
#define ILK_LP0_CURSOR_LATENCY		1300
4320
 
4321
/*
4322
 * Check the wm result.
4323
 *
4324
 * If any calculated watermark values is larger than the maximum value that
4325
 * can be programmed into the associated watermark register, that watermark
4326
 * must be disabled.
4327
 */
4328
static bool ironlake_check_srwm(struct drm_device *dev, int level,
4329
				int fbc_wm, int display_wm, int cursor_wm,
4330
				const struct intel_watermark_params *display,
4331
				const struct intel_watermark_params *cursor)
4332
{
4333
	struct drm_i915_private *dev_priv = dev->dev_private;
4334
 
4335
	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4336
		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4337
 
4338
	if (fbc_wm > SNB_FBC_MAX_SRWM) {
4339
		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4340
			      fbc_wm, SNB_FBC_MAX_SRWM, level);
4341
 
4342
		/* fbc has it's own way to disable FBC WM */
4343
		I915_WRITE(DISP_ARB_CTL,
4344
			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4345
		return false;
4346
	}
4347
 
4348
	if (display_wm > display->max_wm) {
4349
		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4350
			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
4351
		return false;
4352
	}
4353
 
4354
	if (cursor_wm > cursor->max_wm) {
4355
		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4356
			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4357
		return false;
4358
	}
4359
 
4360
	if (!(fbc_wm || display_wm || cursor_wm)) {
4361
		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4362
		return false;
4363
	}
4364
 
4365
	return true;
4366
}
4367
 
4368
/*
4369
 * Compute watermark values of WM[1-3],
4370
 */
4371
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4372
                  int latency_ns,
4373
                  const struct intel_watermark_params *display,
4374
                  const struct intel_watermark_params *cursor,
4375
                  int *fbc_wm, int *display_wm, int *cursor_wm)
4376
{
4377
    struct drm_crtc *crtc;
4378
    unsigned long line_time_us;
4379
    int hdisplay, htotal, pixel_size, clock;
4380
    int line_count, line_size;
4381
    int small, large;
4382
    int entries;
4383
 
4384
    if (!latency_ns) {
4385
        *fbc_wm = *display_wm = *cursor_wm = 0;
4386
        return false;
4387
    }
4388
 
4389
    crtc = intel_get_crtc_for_plane(dev, plane);
4390
    hdisplay = crtc->mode.hdisplay;
4391
    htotal = crtc->mode.htotal;
4392
    clock = crtc->mode.clock;
4393
    pixel_size = crtc->fb->bits_per_pixel / 8;
4394
 
4395
    line_time_us = (htotal * 1000) / clock;
4396
    line_count = (latency_ns / line_time_us + 1000) / 1000;
4397
    line_size = hdisplay * pixel_size;
4398
 
4399
    /* Use the minimum of the small and large buffer method for primary */
4400
    small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4401
    large = line_count * line_size;
4402
 
4403
    entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4404
    *display_wm = entries + display->guard_size;
4405
 
4406
    /*
4407
     * Spec says:
4408
     * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4409
     */
4410
    *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4411
 
4412
    /* calculate the self-refresh watermark for display cursor */
4413
    entries = line_count * pixel_size * 64;
4414
    entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4415
    *cursor_wm = entries + cursor->guard_size;
4416
 
4417
    return ironlake_check_srwm(dev, level,
4418
                   *fbc_wm, *display_wm, *cursor_wm,
4419
                   display, cursor);
4420
}
4421
 
4422
static void ironlake_update_wm(struct drm_device *dev)
4423
{
4424
	struct drm_i915_private *dev_priv = dev->dev_private;
4425
	int fbc_wm, plane_wm, cursor_wm;
4426
	unsigned int enabled;
4427
 
4428
	enabled = 0;
4429
	if (g4x_compute_wm0(dev, 0,
4430
			    &ironlake_display_wm_info,
4431
			    ILK_LP0_PLANE_LATENCY,
4432
			    &ironlake_cursor_wm_info,
4433
			    ILK_LP0_CURSOR_LATENCY,
4434
			    &plane_wm, &cursor_wm)) {
4435
		I915_WRITE(WM0_PIPEA_ILK,
4436
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4437
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4438
			      " plane %d, " "cursor: %d\n",
4439
			      plane_wm, cursor_wm);
4440
		enabled |= 1;
4441
	}
4442
 
4443
	if (g4x_compute_wm0(dev, 1,
4444
			    &ironlake_display_wm_info,
4445
			    ILK_LP0_PLANE_LATENCY,
4446
			    &ironlake_cursor_wm_info,
4447
			    ILK_LP0_CURSOR_LATENCY,
4448
			    &plane_wm, &cursor_wm)) {
4449
		I915_WRITE(WM0_PIPEB_ILK,
4450
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4451
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4452
			      " plane %d, cursor: %d\n",
4453
			      plane_wm, cursor_wm);
4454
		enabled |= 2;
4455
	}
4456
 
4457
	/*
4458
	 * Calculate and update the self-refresh watermark only when one
4459
	 * display plane is used.
4460
	 */
4461
	I915_WRITE(WM3_LP_ILK, 0);
4462
	I915_WRITE(WM2_LP_ILK, 0);
4463
	I915_WRITE(WM1_LP_ILK, 0);
4464
 
4465
	if (!single_plane_enabled(enabled))
4466
		return;
4467
	enabled = ffs(enabled) - 1;
4468
 
4469
	/* WM1 */
4470
	if (!ironlake_compute_srwm(dev, 1, enabled,
4471
				   ILK_READ_WM1_LATENCY() * 500,
4472
				   &ironlake_display_srwm_info,
4473
				   &ironlake_cursor_srwm_info,
4474
				   &fbc_wm, &plane_wm, &cursor_wm))
4475
		return;
4476
 
4477
	I915_WRITE(WM1_LP_ILK,
4478
		   WM1_LP_SR_EN |
4479
		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4480
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4481
		   (plane_wm << WM1_LP_SR_SHIFT) |
4482
		   cursor_wm);
4483
 
4484
	/* WM2 */
4485
	if (!ironlake_compute_srwm(dev, 2, enabled,
4486
				   ILK_READ_WM2_LATENCY() * 500,
4487
				   &ironlake_display_srwm_info,
4488
				   &ironlake_cursor_srwm_info,
4489
				   &fbc_wm, &plane_wm, &cursor_wm))
4490
		return;
4491
 
4492
	I915_WRITE(WM2_LP_ILK,
4493
		   WM2_LP_EN |
4494
		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4495
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4496
		   (plane_wm << WM1_LP_SR_SHIFT) |
4497
		   cursor_wm);
4498
 
4499
	/*
4500
	 * WM3 is unsupported on ILK, probably because we don't have latency
4501
	 * data for that power state
4502
	 */
4503
}
4504
 
2342 Serge 4505
void sandybridge_update_wm(struct drm_device *dev)
2327 Serge 4506
{
4507
	struct drm_i915_private *dev_priv = dev->dev_private;
4508
	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4509
	int fbc_wm, plane_wm, cursor_wm;
4510
	unsigned int enabled;
4511
 
4512
	enabled = 0;
4513
	if (g4x_compute_wm0(dev, 0,
4514
			    &sandybridge_display_wm_info, latency,
4515
			    &sandybridge_cursor_wm_info, latency,
4516
			    &plane_wm, &cursor_wm)) {
4517
		I915_WRITE(WM0_PIPEA_ILK,
4518
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4519
		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4520
			      " plane %d, " "cursor: %d\n",
4521
			      plane_wm, cursor_wm);
4522
		enabled |= 1;
4523
	}
4524
 
4525
	if (g4x_compute_wm0(dev, 1,
4526
			    &sandybridge_display_wm_info, latency,
4527
			    &sandybridge_cursor_wm_info, latency,
4528
			    &plane_wm, &cursor_wm)) {
4529
		I915_WRITE(WM0_PIPEB_ILK,
4530
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4531
		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4532
			      " plane %d, cursor: %d\n",
4533
			      plane_wm, cursor_wm);
4534
		enabled |= 2;
4535
	}
4536
 
2342 Serge 4537
	/* IVB has 3 pipes */
4538
	if (IS_IVYBRIDGE(dev) &&
4539
	    g4x_compute_wm0(dev, 2,
4540
			    &sandybridge_display_wm_info, latency,
4541
			    &sandybridge_cursor_wm_info, latency,
4542
			    &plane_wm, &cursor_wm)) {
4543
		I915_WRITE(WM0_PIPEC_IVB,
4544
			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4545
		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4546
			      " plane %d, cursor: %d\n",
4547
			      plane_wm, cursor_wm);
4548
		enabled |= 3;
4549
	}
4550
 
2327 Serge 4551
	/*
4552
	 * Calculate and update the self-refresh watermark only when one
4553
	 * display plane is used.
4554
	 *
4555
	 * SNB support 3 levels of watermark.
4556
	 *
4557
	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4558
	 * and disabled in the descending order
4559
	 *
4560
	 */
4561
	I915_WRITE(WM3_LP_ILK, 0);
4562
	I915_WRITE(WM2_LP_ILK, 0);
4563
	I915_WRITE(WM1_LP_ILK, 0);
4564
 
2342 Serge 4565
	if (!single_plane_enabled(enabled) ||
4566
	    dev_priv->sprite_scaling_enabled)
2327 Serge 4567
		return;
4568
	enabled = ffs(enabled) - 1;
4569
 
4570
	/* WM1 */
4571
	if (!ironlake_compute_srwm(dev, 1, enabled,
4572
				   SNB_READ_WM1_LATENCY() * 500,
4573
				   &sandybridge_display_srwm_info,
4574
				   &sandybridge_cursor_srwm_info,
4575
				   &fbc_wm, &plane_wm, &cursor_wm))
4576
		return;
4577
 
4578
	I915_WRITE(WM1_LP_ILK,
4579
		   WM1_LP_SR_EN |
4580
		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4581
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4582
		   (plane_wm << WM1_LP_SR_SHIFT) |
4583
		   cursor_wm);
4584
 
4585
	/* WM2 */
4586
	if (!ironlake_compute_srwm(dev, 2, enabled,
4587
				   SNB_READ_WM2_LATENCY() * 500,
4588
				   &sandybridge_display_srwm_info,
4589
				   &sandybridge_cursor_srwm_info,
4590
				   &fbc_wm, &plane_wm, &cursor_wm))
4591
		return;
4592
 
4593
	I915_WRITE(WM2_LP_ILK,
4594
		   WM2_LP_EN |
4595
		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4596
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4597
		   (plane_wm << WM1_LP_SR_SHIFT) |
4598
		   cursor_wm);
4599
 
4600
	/* WM3 */
4601
	if (!ironlake_compute_srwm(dev, 3, enabled,
4602
				   SNB_READ_WM3_LATENCY() * 500,
4603
				   &sandybridge_display_srwm_info,
4604
				   &sandybridge_cursor_srwm_info,
4605
				   &fbc_wm, &plane_wm, &cursor_wm))
4606
		return;
4607
 
4608
	I915_WRITE(WM3_LP_ILK,
4609
		   WM3_LP_EN |
4610
		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4611
		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4612
		   (plane_wm << WM1_LP_SR_SHIFT) |
4613
		   cursor_wm);
2342 Serge 4614
}
2336 Serge 4615
 
2342 Serge 4616
static bool
4617
sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4618
			      uint32_t sprite_width, int pixel_size,
4619
			      const struct intel_watermark_params *display,
4620
			      int display_latency_ns, int *sprite_wm)
4621
{
4622
	struct drm_crtc *crtc;
4623
	int clock;
4624
	int entries, tlb_miss;
2336 Serge 4625
 
2342 Serge 4626
	crtc = intel_get_crtc_for_plane(dev, plane);
4627
	if (crtc->fb == NULL || !crtc->enabled) {
4628
		*sprite_wm = display->guard_size;
4629
		return false;
4630
	}
4631
 
4632
	clock = crtc->mode.clock;
4633
 
4634
	/* Use the small buffer method to calculate the sprite watermark */
4635
	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4636
	tlb_miss = display->fifo_size*display->cacheline_size -
4637
		sprite_width * 8;
4638
	if (tlb_miss > 0)
4639
		entries += tlb_miss;
4640
	entries = DIV_ROUND_UP(entries, display->cacheline_size);
4641
	*sprite_wm = entries + display->guard_size;
4642
	if (*sprite_wm > (int)display->max_wm)
4643
		*sprite_wm = display->max_wm;
4644
 
4645
	return true;
2327 Serge 4646
}
4647
 
2342 Serge 4648
static bool
4649
sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4650
				uint32_t sprite_width, int pixel_size,
4651
				const struct intel_watermark_params *display,
4652
				int latency_ns, int *sprite_wm)
4653
{
4654
	struct drm_crtc *crtc;
4655
	unsigned long line_time_us;
4656
	int clock;
4657
	int line_count, line_size;
4658
	int small, large;
4659
	int entries;
4660
 
4661
	if (!latency_ns) {
4662
		*sprite_wm = 0;
4663
		return false;
4664
	}
4665
 
4666
	crtc = intel_get_crtc_for_plane(dev, plane);
4667
	clock = crtc->mode.clock;
4668
 
4669
	line_time_us = (sprite_width * 1000) / clock;
4670
	line_count = (latency_ns / line_time_us + 1000) / 1000;
4671
	line_size = sprite_width * pixel_size;
4672
 
4673
	/* Use the minimum of the small and large buffer method for primary */
4674
	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4675
	large = line_count * line_size;
4676
 
4677
	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4678
	*sprite_wm = entries + display->guard_size;
4679
 
4680
	return *sprite_wm > 0x3ff ? false : true;
4681
}
4682
 
4683
static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4684
					 uint32_t sprite_width, int pixel_size)
4685
{
4686
	struct drm_i915_private *dev_priv = dev->dev_private;
4687
	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4688
	int sprite_wm, reg;
4689
	int ret;
4690
 
4691
	switch (pipe) {
4692
	case 0:
4693
		reg = WM0_PIPEA_ILK;
4694
		break;
4695
	case 1:
4696
		reg = WM0_PIPEB_ILK;
4697
		break;
4698
	case 2:
4699
		reg = WM0_PIPEC_IVB;
4700
		break;
4701
	default:
4702
		return; /* bad pipe */
4703
	}
4704
 
4705
	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4706
					    &sandybridge_display_wm_info,
4707
					    latency, &sprite_wm);
4708
	if (!ret) {
4709
		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4710
			      pipe);
4711
		return;
4712
	}
4713
 
4714
	I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4715
	DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4716
 
4717
 
4718
	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4719
					      pixel_size,
4720
					      &sandybridge_display_srwm_info,
4721
					      SNB_READ_WM1_LATENCY() * 500,
4722
					      &sprite_wm);
4723
	if (!ret) {
4724
		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4725
			      pipe);
4726
		return;
4727
	}
4728
	I915_WRITE(WM1S_LP_ILK, sprite_wm);
4729
 
4730
	/* Only IVB has two more LP watermarks for sprite */
4731
	if (!IS_IVYBRIDGE(dev))
4732
		return;
4733
 
4734
	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4735
					      pixel_size,
4736
					      &sandybridge_display_srwm_info,
4737
					      SNB_READ_WM2_LATENCY() * 500,
4738
					      &sprite_wm);
4739
	if (!ret) {
4740
		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4741
			      pipe);
4742
		return;
4743
	}
4744
	I915_WRITE(WM2S_LP_IVB, sprite_wm);
4745
 
4746
	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4747
					      pixel_size,
4748
					      &sandybridge_display_srwm_info,
4749
					      SNB_READ_WM3_LATENCY() * 500,
4750
					      &sprite_wm);
4751
	if (!ret) {
4752
		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4753
			      pipe);
4754
		return;
4755
	}
4756
	I915_WRITE(WM3S_LP_IVB, sprite_wm);
4757
}
4758
 
2327 Serge 4759
/**
4760
 * intel_update_watermarks - update FIFO watermark values based on current modes
4761
 *
4762
 * Calculate watermark values for the various WM regs based on current mode
4763
 * and plane configuration.
4764
 *
4765
 * There are several cases to deal with here:
4766
 *   - normal (i.e. non-self-refresh)
4767
 *   - self-refresh (SR) mode
4768
 *   - lines are large relative to FIFO size (buffer can hold up to 2)
4769
 *   - lines are small relative to FIFO size (buffer can hold more than 2
4770
 *     lines), so need to account for TLB latency
4771
 *
4772
 *   The normal calculation is:
4773
 *     watermark = dotclock * bytes per pixel * latency
4774
 *   where latency is platform & configuration dependent (we assume pessimal
4775
 *   values here).
4776
 *
4777
 *   The SR calculation is:
4778
 *     watermark = (trunc(latency/line time)+1) * surface width *
4779
 *       bytes per pixel
4780
 *   where
4781
 *     line time = htotal / dotclock
4782
 *     surface width = hdisplay for normal plane and 64 for cursor
4783
 *   and latency is assumed to be high, as above.
4784
 *
4785
 * The final value programmed to the register should always be rounded up,
4786
 * and include an extra 2 entries to account for clock crossings.
4787
 *
4788
 * We don't use the sprite, so we can ignore that.  And on Crestline we have
4789
 * to set the non-SR watermarks to 8.
4790
 */
4791
static void intel_update_watermarks(struct drm_device *dev)
4792
{
4793
	struct drm_i915_private *dev_priv = dev->dev_private;
2351 Serge 4794
 
2327 Serge 4795
	if (dev_priv->display.update_wm)
4796
		dev_priv->display.update_wm(dev);
4797
}
4798
 
2342 Serge 4799
void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4800
				    uint32_t sprite_width, int pixel_size)
4801
{
4802
	struct drm_i915_private *dev_priv = dev->dev_private;
4803
 
4804
	if (dev_priv->display.update_sprite_wm)
4805
		dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4806
						   pixel_size);
4807
}
4808
 
2327 Serge 4809
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4810
{
2342 Serge 4811
	if (i915_panel_use_ssc >= 0)
4812
		return i915_panel_use_ssc != 0;
4813
	return dev_priv->lvds_use_ssc
2327 Serge 4814
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4815
}
4816
 
4817
/**
4818
 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4819
 * @crtc: CRTC structure
2342 Serge 4820
 * @mode: requested mode
2327 Serge 4821
 *
4822
 * A pipe may be connected to one or more outputs.  Based on the depth of the
4823
 * attached framebuffer, choose a good color depth to use on the pipe.
4824
 *
4825
 * If possible, match the pipe depth to the fb depth.  In some cases, this
4826
 * isn't ideal, because the connected output supports a lesser or restricted
4827
 * set of depths.  Resolve that here:
4828
 *    LVDS typically supports only 6bpc, so clamp down in that case
4829
 *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4830
 *    Displays may support a restricted set as well, check EDID and clamp as
4831
 *      appropriate.
2342 Serge 4832
 *    DP may want to dither down to 6bpc to fit larger modes
2327 Serge 4833
 *
4834
 * RETURNS:
4835
 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4836
 * true if they don't match).
4837
 */
4838
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
2342 Serge 4839
					 unsigned int *pipe_bpp,
4840
					 struct drm_display_mode *mode)
2327 Serge 4841
{
4842
	struct drm_device *dev = crtc->dev;
4843
	struct drm_i915_private *dev_priv = dev->dev_private;
4844
	struct drm_encoder *encoder;
4845
	struct drm_connector *connector;
4846
	unsigned int display_bpc = UINT_MAX, bpc;
4847
 
4848
	/* Walk the encoders & connectors on this crtc, get min bpc */
4849
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4850
		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4851
 
4852
		if (encoder->crtc != crtc)
4853
			continue;
4854
 
4855
		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4856
			unsigned int lvds_bpc;
4857
 
4858
			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4859
			    LVDS_A3_POWER_UP)
4860
				lvds_bpc = 8;
4861
			else
4862
				lvds_bpc = 6;
4863
 
4864
			if (lvds_bpc < display_bpc) {
2342 Serge 4865
				DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
2327 Serge 4866
				display_bpc = lvds_bpc;
4867
			}
4868
			continue;
4869
		}
4870
 
4871
		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4872
			/* Use VBT settings if we have an eDP panel */
4873
			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4874
 
4875
			if (edp_bpc < display_bpc) {
2342 Serge 4876
				DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
2327 Serge 4877
				display_bpc = edp_bpc;
4878
			}
4879
			continue;
4880
		}
4881
 
4882
		/* Not one of the known troublemakers, check the EDID */
4883
		list_for_each_entry(connector, &dev->mode_config.connector_list,
4884
				    head) {
4885
			if (connector->encoder != encoder)
4886
				continue;
4887
 
4888
			/* Don't use an invalid EDID bpc value */
4889
			if (connector->display_info.bpc &&
4890
			    connector->display_info.bpc < display_bpc) {
2342 Serge 4891
				DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
2327 Serge 4892
				display_bpc = connector->display_info.bpc;
4893
			}
4894
		}
4895
 
4896
		/*
4897
		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4898
		 * through, clamp it down.  (Note: >12bpc will be caught below.)
4899
		 */
4900
		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4901
			if (display_bpc > 8 && display_bpc < 12) {
2342 Serge 4902
				DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
2327 Serge 4903
				display_bpc = 12;
4904
			} else {
2342 Serge 4905
				DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
2327 Serge 4906
				display_bpc = 8;
4907
			}
4908
		}
4909
	}
4910
 
2342 Serge 4911
	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4912
		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
4913
		display_bpc = 6;
4914
	}
4915
 
2327 Serge 4916
	/*
4917
	 * We could just drive the pipe at the highest bpc all the time and
4918
	 * enable dithering as needed, but that costs bandwidth.  So choose
4919
	 * the minimum value that expresses the full color range of the fb but
4920
	 * also stays within the max display bpc discovered above.
4921
	 */
4922
 
4923
	switch (crtc->fb->depth) {
4924
	case 8:
4925
		bpc = 8; /* since we go through a colormap */
4926
		break;
4927
	case 15:
4928
	case 16:
4929
		bpc = 6; /* min is 18bpp */
4930
		break;
4931
	case 24:
2342 Serge 4932
		bpc = 8;
2327 Serge 4933
		break;
4934
	case 30:
2342 Serge 4935
		bpc = 10;
2327 Serge 4936
		break;
4937
	case 48:
2342 Serge 4938
		bpc = 12;
2327 Serge 4939
		break;
4940
	default:
4941
		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4942
		bpc = min((unsigned int)8, display_bpc);
4943
		break;
4944
	}
4945
 
2342 Serge 4946
	display_bpc = min(display_bpc, bpc);
4947
 
4948
	DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
2327 Serge 4949
			 bpc, display_bpc);
4950
 
2342 Serge 4951
	*pipe_bpp = display_bpc * 3;
2327 Serge 4952
 
4953
	return display_bpc != bpc;
4954
}
4955
 
4956
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4957
                  struct drm_display_mode *mode,
4958
                  struct drm_display_mode *adjusted_mode,
4959
                  int x, int y,
4960
                  struct drm_framebuffer *old_fb)
4961
{
4962
    struct drm_device *dev = crtc->dev;
4963
    struct drm_i915_private *dev_priv = dev->dev_private;
4964
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4965
    int pipe = intel_crtc->pipe;
4966
    int plane = intel_crtc->plane;
4967
    int refclk, num_connectors = 0;
4968
    intel_clock_t clock, reduced_clock;
4969
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
4970
    bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
4971
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4972
    struct drm_mode_config *mode_config = &dev->mode_config;
4973
    struct intel_encoder *encoder;
4974
    const intel_limit_t *limit;
4975
    int ret;
4976
    u32 temp;
4977
    u32 lvds_sync = 0;
4978
 
4979
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4980
        if (encoder->base.crtc != crtc)
4981
            continue;
4982
 
4983
        switch (encoder->type) {
4984
        case INTEL_OUTPUT_LVDS:
4985
            is_lvds = true;
4986
            break;
4987
        case INTEL_OUTPUT_SDVO:
4988
        case INTEL_OUTPUT_HDMI:
4989
            is_sdvo = true;
4990
            if (encoder->needs_tv_clock)
4991
                is_tv = true;
4992
            break;
4993
        case INTEL_OUTPUT_DVO:
4994
            is_dvo = true;
4995
            break;
4996
        case INTEL_OUTPUT_TVOUT:
4997
            is_tv = true;
4998
            break;
4999
        case INTEL_OUTPUT_ANALOG:
5000
            is_crt = true;
5001
            break;
5002
        case INTEL_OUTPUT_DISPLAYPORT:
5003
            is_dp = true;
5004
            break;
5005
        }
5006
 
5007
        num_connectors++;
5008
    }
5009
 
5010
    if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5011
        refclk = dev_priv->lvds_ssc_freq * 1000;
5012
        DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5013
                  refclk / 1000);
5014
    } else if (!IS_GEN2(dev)) {
5015
        refclk = 96000;
5016
    } else {
5017
        refclk = 48000;
5018
    }
5019
 
5020
    /*
5021
     * Returns a set of divisors for the desired target clock with the given
5022
     * refclk, or FALSE.  The returned values represent the clock equation:
5023
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5024
     */
5025
    limit = intel_limit(crtc, refclk);
5026
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
5027
    if (!ok) {
5028
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
5029
        return -EINVAL;
5030
    }
5031
 
5032
    /* Ensure that the cursor is valid for the new mode before changing... */
5033
//    intel_crtc_update_cursor(crtc, true);
5034
 
5035
    if (is_lvds && dev_priv->lvds_downclock_avail) {
5036
        has_reduced_clock = limit->find_pll(limit, crtc,
5037
                            dev_priv->lvds_downclock,
5038
                            refclk,
5039
                            &reduced_clock);
5040
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
5041
            /*
5042
             * If the different P is found, it means that we can't
5043
             * switch the display clock by using the FP0/FP1.
5044
             * In such case we will disable the LVDS downclock
5045
             * feature.
5046
             */
5047
            DRM_DEBUG_KMS("Different P is found for "
5048
                      "LVDS clock/downclock\n");
5049
            has_reduced_clock = 0;
5050
        }
5051
    }
5052
    /* SDVO TV has fixed PLL values depend on its clock range,
5053
       this mirrors vbios setting. */
5054
    if (is_sdvo && is_tv) {
5055
        if (adjusted_mode->clock >= 100000
5056
            && adjusted_mode->clock < 140500) {
5057
            clock.p1 = 2;
5058
            clock.p2 = 10;
5059
            clock.n = 3;
5060
            clock.m1 = 16;
5061
            clock.m2 = 8;
5062
        } else if (adjusted_mode->clock >= 140500
5063
               && adjusted_mode->clock <= 200000) {
5064
            clock.p1 = 1;
5065
            clock.p2 = 10;
5066
            clock.n = 6;
5067
            clock.m1 = 12;
5068
            clock.m2 = 8;
5069
        }
5070
    }
5071
 
5072
    if (IS_PINEVIEW(dev)) {
5073
        fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
5074
        if (has_reduced_clock)
5075
            fp2 = (1 << reduced_clock.n) << 16 |
5076
                reduced_clock.m1 << 8 | reduced_clock.m2;
5077
    } else {
5078
        fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5079
        if (has_reduced_clock)
5080
            fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5081
                reduced_clock.m2;
5082
    }
5083
 
5084
    dpll = DPLL_VGA_MODE_DIS;
5085
 
5086
    if (!IS_GEN2(dev)) {
5087
        if (is_lvds)
5088
            dpll |= DPLLB_MODE_LVDS;
5089
        else
5090
            dpll |= DPLLB_MODE_DAC_SERIAL;
5091
        if (is_sdvo) {
5092
            int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5093
            if (pixel_multiplier > 1) {
5094
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5095
                    dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5096
            }
5097
            dpll |= DPLL_DVO_HIGH_SPEED;
5098
        }
5099
        if (is_dp)
5100
            dpll |= DPLL_DVO_HIGH_SPEED;
5101
 
5102
        /* compute bitmask from p1 value */
5103
        if (IS_PINEVIEW(dev))
5104
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5105
        else {
5106
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5107
            if (IS_G4X(dev) && has_reduced_clock)
5108
                dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5109
        }
5110
        switch (clock.p2) {
5111
        case 5:
5112
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5113
            break;
5114
        case 7:
5115
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5116
            break;
5117
        case 10:
5118
            dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5119
            break;
5120
        case 14:
5121
            dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5122
            break;
5123
        }
5124
        if (INTEL_INFO(dev)->gen >= 4)
5125
            dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5126
    } else {
5127
        if (is_lvds) {
5128
            dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5129
        } else {
5130
            if (clock.p1 == 2)
5131
                dpll |= PLL_P1_DIVIDE_BY_TWO;
5132
            else
5133
                dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5134
            if (clock.p2 == 4)
5135
                dpll |= PLL_P2_DIVIDE_BY_4;
5136
        }
5137
    }
5138
 
5139
    if (is_sdvo && is_tv)
5140
        dpll |= PLL_REF_INPUT_TVCLKINBC;
5141
    else if (is_tv)
5142
        /* XXX: just matching BIOS for now */
5143
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
5144
        dpll |= 3;
5145
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5146
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5147
    else
5148
        dpll |= PLL_REF_INPUT_DREFCLK;
5149
 
5150
    /* setup pipeconf */
5151
    pipeconf = I915_READ(PIPECONF(pipe));
5152
 
5153
    /* Set up the display plane register */
5154
    dspcntr = DISPPLANE_GAMMA_ENABLE;
5155
 
5156
    /* Ironlake's plane is forced to pipe, bit 24 is to
5157
       enable color space conversion */
5158
    if (pipe == 0)
5159
        dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5160
    else
5161
        dspcntr |= DISPPLANE_SEL_PIPE_B;
5162
 
5163
    if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5164
        /* Enable pixel doubling when the dot clock is > 90% of the (display)
5165
         * core speed.
5166
         *
5167
         * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5168
         * pipe == 0 check?
5169
         */
5170
        if (mode->clock >
5171
            dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5172
            pipeconf |= PIPECONF_DOUBLE_WIDE;
5173
        else
5174
            pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5175
    }
5176
 
2342 Serge 5177
	/* default to 8bpc */
5178
	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5179
	if (is_dp) {
5180
		if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5181
			pipeconf |= PIPECONF_BPP_6 |
5182
				    PIPECONF_DITHER_EN |
5183
				    PIPECONF_DITHER_TYPE_SP;
5184
		}
5185
	}
5186
 
2327 Serge 5187
    dpll |= DPLL_VCO_ENABLE;
5188
 
5189
    DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5190
    drm_mode_debug_printmodeline(mode);
5191
 
5192
    I915_WRITE(FP0(pipe), fp);
5193
    I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5194
 
5195
    POSTING_READ(DPLL(pipe));
5196
    udelay(150);
5197
 
5198
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5199
     * This is an exception to the general rule that mode_set doesn't turn
5200
     * things on.
5201
     */
5202
    if (is_lvds) {
5203
        temp = I915_READ(LVDS);
5204
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5205
        if (pipe == 1) {
5206
            temp |= LVDS_PIPEB_SELECT;
5207
        } else {
5208
            temp &= ~LVDS_PIPEB_SELECT;
5209
        }
5210
        /* set the corresponsding LVDS_BORDER bit */
5211
        temp |= dev_priv->lvds_border_bits;
5212
        /* Set the B0-B3 data pairs corresponding to whether we're going to
5213
         * set the DPLLs for dual-channel mode or not.
5214
         */
5215
        if (clock.p2 == 7)
5216
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5217
        else
5218
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5219
 
5220
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5221
         * appropriately here, but we need to look more thoroughly into how
5222
         * panels behave in the two modes.
5223
         */
5224
        /* set the dithering flag on LVDS as needed */
5225
        if (INTEL_INFO(dev)->gen >= 4) {
5226
            if (dev_priv->lvds_dither)
5227
                temp |= LVDS_ENABLE_DITHER;
5228
            else
5229
                temp &= ~LVDS_ENABLE_DITHER;
5230
        }
5231
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5232
            lvds_sync |= LVDS_HSYNC_POLARITY;
5233
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5234
            lvds_sync |= LVDS_VSYNC_POLARITY;
5235
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5236
            != lvds_sync) {
5237
            char flags[2] = "-+";
5238
            DRM_INFO("Changing LVDS panel from "
5239
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5240
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
5241
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
5242
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5243
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5244
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5245
            temp |= lvds_sync;
5246
        }
5247
        I915_WRITE(LVDS, temp);
5248
    }
5249
 
5250
    if (is_dp) {
5251
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
5252
    }
5253
 
5254
    I915_WRITE(DPLL(pipe), dpll);
5255
 
5256
    /* Wait for the clocks to stabilize. */
5257
    POSTING_READ(DPLL(pipe));
5258
    udelay(150);
5259
 
5260
    if (INTEL_INFO(dev)->gen >= 4) {
5261
        temp = 0;
5262
        if (is_sdvo) {
5263
            temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5264
            if (temp > 1)
5265
                temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5266
            else
5267
                temp = 0;
5268
        }
5269
        I915_WRITE(DPLL_MD(pipe), temp);
5270
    } else {
5271
        /* The pixel multiplier can only be updated once the
5272
         * DPLL is enabled and the clocks are stable.
5273
         *
5274
         * So write it again.
5275
         */
5276
        I915_WRITE(DPLL(pipe), dpll);
5277
    }
5278
 
5279
    intel_crtc->lowfreq_avail = false;
5280
    if (is_lvds && has_reduced_clock && i915_powersave) {
5281
        I915_WRITE(FP1(pipe), fp2);
5282
        intel_crtc->lowfreq_avail = true;
5283
        if (HAS_PIPE_CXSR(dev)) {
5284
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5285
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5286
        }
5287
    } else {
5288
        I915_WRITE(FP1(pipe), fp);
5289
        if (HAS_PIPE_CXSR(dev)) {
5290
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5291
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5292
        }
5293
    }
5294
 
5295
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5296
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5297
        /* the chip adds 2 halflines automatically */
5298
        adjusted_mode->crtc_vdisplay -= 1;
5299
        adjusted_mode->crtc_vtotal -= 1;
5300
        adjusted_mode->crtc_vblank_start -= 1;
5301
        adjusted_mode->crtc_vblank_end -= 1;
5302
        adjusted_mode->crtc_vsync_end -= 1;
5303
        adjusted_mode->crtc_vsync_start -= 1;
5304
    } else
2342 Serge 5305
		pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
2327 Serge 5306
 
5307
    I915_WRITE(HTOTAL(pipe),
5308
           (adjusted_mode->crtc_hdisplay - 1) |
5309
           ((adjusted_mode->crtc_htotal - 1) << 16));
5310
    I915_WRITE(HBLANK(pipe),
5311
           (adjusted_mode->crtc_hblank_start - 1) |
5312
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
5313
    I915_WRITE(HSYNC(pipe),
5314
           (adjusted_mode->crtc_hsync_start - 1) |
5315
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
5316
 
5317
    I915_WRITE(VTOTAL(pipe),
5318
           (adjusted_mode->crtc_vdisplay - 1) |
5319
           ((adjusted_mode->crtc_vtotal - 1) << 16));
5320
    I915_WRITE(VBLANK(pipe),
5321
           (adjusted_mode->crtc_vblank_start - 1) |
5322
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
5323
    I915_WRITE(VSYNC(pipe),
5324
           (adjusted_mode->crtc_vsync_start - 1) |
5325
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
5326
 
5327
    /* pipesrc and dspsize control the size that is scaled from,
5328
     * which should always be the user's requested size.
5329
     */
5330
    I915_WRITE(DSPSIZE(plane),
5331
           ((mode->vdisplay - 1) << 16) |
5332
           (mode->hdisplay - 1));
5333
    I915_WRITE(DSPPOS(plane), 0);
5334
    I915_WRITE(PIPESRC(pipe),
5335
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5336
 
5337
    I915_WRITE(PIPECONF(pipe), pipeconf);
5338
    POSTING_READ(PIPECONF(pipe));
5339
    intel_enable_pipe(dev_priv, pipe, false);
5340
 
5341
    intel_wait_for_vblank(dev, pipe);
5342
 
5343
    I915_WRITE(DSPCNTR(plane), dspcntr);
5344
    POSTING_READ(DSPCNTR(plane));
5345
    intel_enable_plane(dev_priv, plane, pipe);
5346
 
5347
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
5348
 
5349
    intel_update_watermarks(dev);
5350
 
5351
    return ret;
5352
}
5353
 
2342 Serge 5354
/*
5355
 * Initialize reference clocks when the driver loads
5356
 */
5357
void ironlake_init_pch_refclk(struct drm_device *dev)
2327 Serge 5358
{
5359
	struct drm_i915_private *dev_priv = dev->dev_private;
5360
	struct drm_mode_config *mode_config = &dev->mode_config;
5361
	struct intel_encoder *encoder;
5362
	u32 temp;
5363
	bool has_lvds = false;
2342 Serge 5364
	bool has_cpu_edp = false;
5365
	bool has_pch_edp = false;
5366
	bool has_panel = false;
5367
	bool has_ck505 = false;
5368
	bool can_ssc = false;
2327 Serge 5369
 
5370
	/* We need to take the global config into account */
5371
		list_for_each_entry(encoder, &mode_config->encoder_list,
5372
				    base.head) {
5373
			switch (encoder->type) {
5374
			case INTEL_OUTPUT_LVDS:
2342 Serge 5375
			has_panel = true;
2327 Serge 5376
				has_lvds = true;
2342 Serge 5377
			break;
2327 Serge 5378
			case INTEL_OUTPUT_EDP:
2342 Serge 5379
			has_panel = true;
5380
			if (intel_encoder_is_pch_edp(&encoder->base))
5381
				has_pch_edp = true;
5382
			else
5383
				has_cpu_edp = true;
2327 Serge 5384
				break;
5385
			}
5386
		}
2342 Serge 5387
 
5388
	if (HAS_PCH_IBX(dev)) {
5389
		has_ck505 = dev_priv->display_clock_mode;
5390
		can_ssc = has_ck505;
5391
	} else {
5392
		has_ck505 = false;
5393
		can_ssc = true;
2327 Serge 5394
	}
5395
 
2342 Serge 5396
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5397
		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5398
		      has_ck505);
5399
 
2327 Serge 5400
	/* Ironlake: try to setup display ref clock before DPLL
5401
	 * enabling. This is only under driver's control after
5402
	 * PCH B stepping, previous chipset stepping should be
5403
	 * ignoring this setting.
5404
	 */
5405
	temp = I915_READ(PCH_DREF_CONTROL);
5406
	/* Always enable nonspread source */
5407
	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
2342 Serge 5408
 
5409
	if (has_ck505)
5410
		temp |= DREF_NONSPREAD_CK505_ENABLE;
5411
	else
2327 Serge 5412
	temp |= DREF_NONSPREAD_SOURCE_ENABLE;
2342 Serge 5413
 
5414
	if (has_panel) {
2327 Serge 5415
	temp &= ~DREF_SSC_SOURCE_MASK;
5416
	temp |= DREF_SSC_SOURCE_ENABLE;
5417
 
2342 Serge 5418
		/* SSC must be turned on before enabling the CPU output  */
5419
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5420
			DRM_DEBUG_KMS("Using SSC on panel\n");
5421
			temp |= DREF_SSC1_ENABLE;
5422
		}
2327 Serge 5423
 
2342 Serge 5424
		/* Get SSC going before enabling the outputs */
2327 Serge 5425
			I915_WRITE(PCH_DREF_CONTROL, temp);
5426
			POSTING_READ(PCH_DREF_CONTROL);
5427
			udelay(200);
2342 Serge 5428
 
2327 Serge 5429
		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5430
 
5431
		/* Enable CPU source on CPU attached eDP */
2342 Serge 5432
		if (has_cpu_edp) {
5433
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5434
				DRM_DEBUG_KMS("Using SSC on eDP\n");
2327 Serge 5435
				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
2342 Serge 5436
			}
2327 Serge 5437
			else
5438
				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2342 Serge 5439
		} else
5440
			temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5441
 
5442
		I915_WRITE(PCH_DREF_CONTROL, temp);
5443
		POSTING_READ(PCH_DREF_CONTROL);
5444
		udelay(200);
2327 Serge 5445
		} else {
2342 Serge 5446
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
5447
 
5448
		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5449
 
5450
		/* Turn off CPU output */
5451
		temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5452
 
2327 Serge 5453
		I915_WRITE(PCH_DREF_CONTROL, temp);
5454
		POSTING_READ(PCH_DREF_CONTROL);
5455
		udelay(200);
2342 Serge 5456
 
5457
		/* Turn off the SSC source */
5458
		temp &= ~DREF_SSC_SOURCE_MASK;
5459
		temp |= DREF_SSC_SOURCE_DISABLE;
5460
 
5461
		/* Turn off SSC1 */
5462
		temp &= ~ DREF_SSC1_ENABLE;
5463
 
5464
		I915_WRITE(PCH_DREF_CONTROL, temp);
5465
		POSTING_READ(PCH_DREF_CONTROL);
5466
		udelay(200);
2327 Serge 5467
	}
5468
}
5469
 
2342 Serge 5470
static int ironlake_get_refclk(struct drm_crtc *crtc)
5471
{
5472
	struct drm_device *dev = crtc->dev;
5473
	struct drm_i915_private *dev_priv = dev->dev_private;
5474
	struct intel_encoder *encoder;
5475
	struct drm_mode_config *mode_config = &dev->mode_config;
5476
	struct intel_encoder *edp_encoder = NULL;
5477
	int num_connectors = 0;
5478
	bool is_lvds = false;
5479
 
5480
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5481
		if (encoder->base.crtc != crtc)
5482
			continue;
5483
 
5484
		switch (encoder->type) {
5485
		case INTEL_OUTPUT_LVDS:
5486
			is_lvds = true;
5487
			break;
5488
		case INTEL_OUTPUT_EDP:
5489
			edp_encoder = encoder;
5490
			break;
5491
		}
5492
		num_connectors++;
5493
	}
5494
 
5495
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5496
		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5497
			      dev_priv->lvds_ssc_freq);
5498
		return dev_priv->lvds_ssc_freq * 1000;
5499
	}
5500
 
5501
	return 120000;
5502
}
5503
 
2327 Serge 5504
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5505
                  struct drm_display_mode *mode,
5506
                  struct drm_display_mode *adjusted_mode,
5507
                  int x, int y,
5508
                  struct drm_framebuffer *old_fb)
5509
{
5510
    struct drm_device *dev = crtc->dev;
5511
    struct drm_i915_private *dev_priv = dev->dev_private;
5512
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5513
    int pipe = intel_crtc->pipe;
5514
    int plane = intel_crtc->plane;
5515
    int refclk, num_connectors = 0;
5516
    intel_clock_t clock, reduced_clock;
5517
    u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5518
    bool ok, has_reduced_clock = false, is_sdvo = false;
5519
    bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5520
    struct intel_encoder *has_edp_encoder = NULL;
5521
    struct drm_mode_config *mode_config = &dev->mode_config;
5522
    struct intel_encoder *encoder;
5523
    const intel_limit_t *limit;
5524
    int ret;
5525
    struct fdi_m_n m_n = {0};
5526
    u32 temp;
5527
    u32 lvds_sync = 0;
5528
    int target_clock, pixel_multiplier, lane, link_bw, factor;
5529
    unsigned int pipe_bpp;
5530
    bool dither;
5531
 
2336 Serge 5532
    ENTER();
5533
 
2327 Serge 5534
    list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5535
        if (encoder->base.crtc != crtc)
5536
            continue;
5537
 
5538
        switch (encoder->type) {
5539
        case INTEL_OUTPUT_LVDS:
5540
            is_lvds = true;
5541
            break;
5542
        case INTEL_OUTPUT_SDVO:
5543
        case INTEL_OUTPUT_HDMI:
5544
            is_sdvo = true;
5545
            if (encoder->needs_tv_clock)
5546
                is_tv = true;
5547
            break;
5548
        case INTEL_OUTPUT_TVOUT:
5549
            is_tv = true;
5550
            break;
5551
        case INTEL_OUTPUT_ANALOG:
5552
            is_crt = true;
5553
            break;
5554
        case INTEL_OUTPUT_DISPLAYPORT:
5555
            is_dp = true;
5556
            break;
5557
        case INTEL_OUTPUT_EDP:
5558
            has_edp_encoder = encoder;
5559
            break;
5560
        }
5561
 
5562
        num_connectors++;
5563
    }
5564
 
2342 Serge 5565
	refclk = ironlake_get_refclk(crtc);
2327 Serge 5566
 
5567
    /*
5568
     * Returns a set of divisors for the desired target clock with the given
5569
     * refclk, or FALSE.  The returned values represent the clock equation:
5570
     * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5571
     */
5572
    limit = intel_limit(crtc, refclk);
5573
    ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
5574
    if (!ok) {
5575
        DRM_ERROR("Couldn't find PLL settings for mode!\n");
5576
        return -EINVAL;
5577
    }
5578
 
5579
    /* Ensure that the cursor is valid for the new mode before changing... */
5580
//    intel_crtc_update_cursor(crtc, true);
5581
 
5582
    if (is_lvds && dev_priv->lvds_downclock_avail) {
5583
        has_reduced_clock = limit->find_pll(limit, crtc,
5584
                            dev_priv->lvds_downclock,
5585
                            refclk,
5586
                            &reduced_clock);
5587
        if (has_reduced_clock && (clock.p != reduced_clock.p)) {
5588
            /*
5589
             * If the different P is found, it means that we can't
5590
             * switch the display clock by using the FP0/FP1.
5591
             * In such case we will disable the LVDS downclock
5592
             * feature.
5593
             */
5594
            DRM_DEBUG_KMS("Different P is found for "
5595
                      "LVDS clock/downclock\n");
5596
            has_reduced_clock = 0;
5597
        }
5598
    }
5599
    /* SDVO TV has fixed PLL values depend on its clock range,
5600
       this mirrors vbios setting. */
5601
    if (is_sdvo && is_tv) {
5602
        if (adjusted_mode->clock >= 100000
5603
            && adjusted_mode->clock < 140500) {
5604
            clock.p1 = 2;
5605
            clock.p2 = 10;
5606
            clock.n = 3;
5607
            clock.m1 = 16;
5608
            clock.m2 = 8;
5609
        } else if (adjusted_mode->clock >= 140500
5610
               && adjusted_mode->clock <= 200000) {
5611
            clock.p1 = 1;
5612
            clock.p2 = 10;
5613
            clock.n = 6;
5614
            clock.m1 = 12;
5615
            clock.m2 = 8;
5616
        }
5617
    }
5618
 
5619
    /* FDI link */
5620
    pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5621
    lane = 0;
5622
    /* CPU eDP doesn't require FDI link, so just set DP M/N
5623
       according to current link config */
5624
    if (has_edp_encoder &&
5625
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5626
        target_clock = mode->clock;
5627
        intel_edp_link_config(has_edp_encoder,
5628
                      &lane, &link_bw);
5629
    } else {
5630
        /* [e]DP over FDI requires target mode clock
5631
           instead of link clock */
5632
        if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5633
            target_clock = mode->clock;
5634
        else
5635
            target_clock = adjusted_mode->clock;
5636
 
5637
        /* FDI is a binary signal running at ~2.7GHz, encoding
5638
         * each output octet as 10 bits. The actual frequency
5639
         * is stored as a divider into a 100MHz clock, and the
5640
         * mode pixel clock is stored in units of 1KHz.
5641
         * Hence the bw of each lane in terms of the mode signal
5642
         * is:
5643
         */
5644
        link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5645
    }
5646
 
5647
    /* determine panel color depth */
5648
    temp = I915_READ(PIPECONF(pipe));
5649
    temp &= ~PIPE_BPC_MASK;
2342 Serge 5650
	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
2327 Serge 5651
    switch (pipe_bpp) {
5652
    case 18:
5653
        temp |= PIPE_6BPC;
5654
        break;
5655
    case 24:
5656
        temp |= PIPE_8BPC;
5657
        break;
5658
    case 30:
5659
        temp |= PIPE_10BPC;
5660
        break;
5661
    case 36:
5662
        temp |= PIPE_12BPC;
5663
        break;
5664
    default:
5665
        WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5666
            pipe_bpp);
5667
        temp |= PIPE_8BPC;
5668
        pipe_bpp = 24;
5669
        break;
5670
    }
5671
 
5672
    intel_crtc->bpp = pipe_bpp;
5673
    I915_WRITE(PIPECONF(pipe), temp);
5674
 
5675
    if (!lane) {
5676
        /*
5677
         * Account for spread spectrum to avoid
5678
         * oversubscribing the link. Max center spread
5679
         * is 2.5%; use 5% for safety's sake.
5680
         */
5681
        u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5682
        lane = bps / (link_bw * 8) + 1;
5683
    }
5684
 
5685
    intel_crtc->fdi_lanes = lane;
5686
 
5687
    if (pixel_multiplier > 1)
5688
        link_bw *= pixel_multiplier;
5689
    ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5690
                 &m_n);
5691
 
5692
    fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5693
    if (has_reduced_clock)
5694
        fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5695
            reduced_clock.m2;
5696
 
5697
    /* Enable autotuning of the PLL clock (if permissible) */
5698
    factor = 21;
5699
    if (is_lvds) {
5700
        if ((intel_panel_use_ssc(dev_priv) &&
5701
             dev_priv->lvds_ssc_freq == 100) ||
5702
            (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5703
            factor = 25;
5704
    } else if (is_sdvo && is_tv)
5705
        factor = 20;
5706
 
5707
    if (clock.m < factor * clock.n)
5708
        fp |= FP_CB_TUNE;
5709
 
5710
    dpll = 0;
5711
 
5712
    if (is_lvds)
5713
        dpll |= DPLLB_MODE_LVDS;
5714
    else
5715
        dpll |= DPLLB_MODE_DAC_SERIAL;
5716
    if (is_sdvo) {
5717
        int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5718
        if (pixel_multiplier > 1) {
5719
            dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5720
        }
5721
        dpll |= DPLL_DVO_HIGH_SPEED;
5722
    }
5723
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5724
        dpll |= DPLL_DVO_HIGH_SPEED;
5725
 
5726
    /* compute bitmask from p1 value */
5727
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5728
    /* also FPA1 */
5729
    dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5730
 
5731
    switch (clock.p2) {
5732
    case 5:
5733
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5734
        break;
5735
    case 7:
5736
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5737
        break;
5738
    case 10:
5739
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5740
        break;
5741
    case 14:
5742
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5743
        break;
5744
    }
5745
 
5746
    if (is_sdvo && is_tv)
5747
        dpll |= PLL_REF_INPUT_TVCLKINBC;
5748
    else if (is_tv)
5749
        /* XXX: just matching BIOS for now */
5750
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
5751
        dpll |= 3;
5752
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5753
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5754
    else
5755
        dpll |= PLL_REF_INPUT_DREFCLK;
5756
 
5757
    /* setup pipeconf */
5758
    pipeconf = I915_READ(PIPECONF(pipe));
5759
 
5760
    /* Set up the display plane register */
5761
    dspcntr = DISPPLANE_GAMMA_ENABLE;
5762
 
2342 Serge 5763
	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
2327 Serge 5764
    drm_mode_debug_printmodeline(mode);
5765
 
5766
    /* PCH eDP needs FDI, but CPU eDP does not */
2342 Serge 5767
	if (!intel_crtc->no_pll) {
5768
		if (!has_edp_encoder ||
5769
		    intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
2327 Serge 5770
        I915_WRITE(PCH_FP0(pipe), fp);
5771
        I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5772
 
5773
        POSTING_READ(PCH_DPLL(pipe));
5774
        udelay(150);
5775
    }
2342 Serge 5776
	} else {
5777
		if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5778
		    fp == I915_READ(PCH_FP0(0))) {
5779
			intel_crtc->use_pll_a = true;
5780
			DRM_DEBUG_KMS("using pipe a dpll\n");
5781
		} else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5782
			   fp == I915_READ(PCH_FP0(1))) {
5783
			intel_crtc->use_pll_a = false;
5784
			DRM_DEBUG_KMS("using pipe b dpll\n");
5785
		} else {
5786
			DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5787
			return -EINVAL;
2327 Serge 5788
        }
5789
    }
5790
 
5791
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5792
     * This is an exception to the general rule that mode_set doesn't turn
5793
     * things on.
5794
     */
5795
    if (is_lvds) {
5796
        temp = I915_READ(PCH_LVDS);
5797
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
2342 Serge 5798
		if (HAS_PCH_CPT(dev)) {
5799
			temp &= ~PORT_TRANS_SEL_MASK;
5800
			temp |= PORT_TRANS_SEL_CPT(pipe);
5801
		} else {
5802
			if (pipe == 1)
2327 Serge 5803
                temp |= LVDS_PIPEB_SELECT;
5804
            else
5805
                temp &= ~LVDS_PIPEB_SELECT;
5806
        }
2342 Serge 5807
 
2327 Serge 5808
        /* set the corresponsding LVDS_BORDER bit */
5809
        temp |= dev_priv->lvds_border_bits;
5810
        /* Set the B0-B3 data pairs corresponding to whether we're going to
5811
         * set the DPLLs for dual-channel mode or not.
5812
         */
5813
        if (clock.p2 == 7)
5814
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5815
        else
5816
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5817
 
5818
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5819
         * appropriately here, but we need to look more thoroughly into how
5820
         * panels behave in the two modes.
5821
         */
5822
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5823
            lvds_sync |= LVDS_HSYNC_POLARITY;
5824
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5825
            lvds_sync |= LVDS_VSYNC_POLARITY;
5826
        if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5827
            != lvds_sync) {
5828
            char flags[2] = "-+";
5829
            DRM_INFO("Changing LVDS panel from "
5830
                 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5831
                 flags[!(temp & LVDS_HSYNC_POLARITY)],
5832
                 flags[!(temp & LVDS_VSYNC_POLARITY)],
5833
                 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5834
                 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5835
            temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5836
            temp |= lvds_sync;
5837
        }
5838
        I915_WRITE(PCH_LVDS, temp);
5839
    }
5840
 
5841
    pipeconf &= ~PIPECONF_DITHER_EN;
5842
    pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5843
    if ((is_lvds && dev_priv->lvds_dither) || dither) {
5844
        pipeconf |= PIPECONF_DITHER_EN;
2342 Serge 5845
		pipeconf |= PIPECONF_DITHER_TYPE_SP;
2327 Serge 5846
    }
5847
    if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5848
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
5849
    } else {
5850
        /* For non-DP output, clear any trans DP clock recovery setting.*/
5851
        I915_WRITE(TRANSDATA_M1(pipe), 0);
5852
        I915_WRITE(TRANSDATA_N1(pipe), 0);
5853
        I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5854
        I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5855
    }
5856
 
2342 Serge 5857
	if (!intel_crtc->no_pll &&
5858
	    (!has_edp_encoder ||
5859
	     intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
2327 Serge 5860
        I915_WRITE(PCH_DPLL(pipe), dpll);
5861
 
5862
        /* Wait for the clocks to stabilize. */
5863
        POSTING_READ(PCH_DPLL(pipe));
5864
        udelay(150);
5865
 
5866
        /* The pixel multiplier can only be updated once the
5867
         * DPLL is enabled and the clocks are stable.
5868
         *
5869
         * So write it again.
5870
         */
5871
        I915_WRITE(PCH_DPLL(pipe), dpll);
5872
    }
5873
 
5874
    intel_crtc->lowfreq_avail = false;
2342 Serge 5875
	if (!intel_crtc->no_pll) {
2327 Serge 5876
    if (is_lvds && has_reduced_clock && i915_powersave) {
5877
        I915_WRITE(PCH_FP1(pipe), fp2);
5878
        intel_crtc->lowfreq_avail = true;
5879
        if (HAS_PIPE_CXSR(dev)) {
5880
            DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5881
            pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5882
        }
5883
    } else {
5884
        I915_WRITE(PCH_FP1(pipe), fp);
5885
        if (HAS_PIPE_CXSR(dev)) {
5886
            DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5887
            pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5888
        }
5889
    }
2342 Serge 5890
	}
2327 Serge 5891
 
5892
    if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5893
        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5894
        /* the chip adds 2 halflines automatically */
5895
        adjusted_mode->crtc_vdisplay -= 1;
5896
        adjusted_mode->crtc_vtotal -= 1;
5897
        adjusted_mode->crtc_vblank_start -= 1;
5898
        adjusted_mode->crtc_vblank_end -= 1;
5899
        adjusted_mode->crtc_vsync_end -= 1;
5900
        adjusted_mode->crtc_vsync_start -= 1;
5901
    } else
5902
        pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
5903
 
5904
    I915_WRITE(HTOTAL(pipe),
5905
           (adjusted_mode->crtc_hdisplay - 1) |
5906
           ((adjusted_mode->crtc_htotal - 1) << 16));
5907
    I915_WRITE(HBLANK(pipe),
5908
           (adjusted_mode->crtc_hblank_start - 1) |
5909
           ((adjusted_mode->crtc_hblank_end - 1) << 16));
5910
    I915_WRITE(HSYNC(pipe),
5911
           (adjusted_mode->crtc_hsync_start - 1) |
5912
           ((adjusted_mode->crtc_hsync_end - 1) << 16));
5913
 
5914
    I915_WRITE(VTOTAL(pipe),
5915
           (adjusted_mode->crtc_vdisplay - 1) |
5916
           ((adjusted_mode->crtc_vtotal - 1) << 16));
5917
    I915_WRITE(VBLANK(pipe),
5918
           (adjusted_mode->crtc_vblank_start - 1) |
5919
           ((adjusted_mode->crtc_vblank_end - 1) << 16));
5920
    I915_WRITE(VSYNC(pipe),
5921
           (adjusted_mode->crtc_vsync_start - 1) |
5922
           ((adjusted_mode->crtc_vsync_end - 1) << 16));
5923
 
5924
    /* pipesrc controls the size that is scaled from, which should
5925
     * always be the user's requested size.
5926
     */
5927
    I915_WRITE(PIPESRC(pipe),
5928
           ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5929
 
5930
    I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5931
    I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5932
    I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5933
    I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5934
 
5935
    if (has_edp_encoder &&
5936
        !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5937
        ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5938
    }
5939
 
5940
    I915_WRITE(PIPECONF(pipe), pipeconf);
5941
    POSTING_READ(PIPECONF(pipe));
5942
 
5943
    intel_wait_for_vblank(dev, pipe);
5944
 
5945
    if (IS_GEN5(dev)) {
5946
        /* enable address swizzle for tiling buffer */
5947
        temp = I915_READ(DISP_ARB_CTL);
5948
        I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
5949
    }
5950
 
5951
    I915_WRITE(DSPCNTR(plane), dspcntr);
5952
    POSTING_READ(DSPCNTR(plane));
5953
 
5954
    ret = intel_pipe_set_base(crtc, x, y, old_fb);
5955
 
2336 Serge 5956
    dbgprintf("Set base\n");
5957
 
2327 Serge 5958
    intel_update_watermarks(dev);
5959
 
2336 Serge 5960
    LEAVE();
5961
 
2327 Serge 5962
    return ret;
5963
}
5964
 
2330 Serge 5965
static int intel_crtc_mode_set(struct drm_crtc *crtc,
5966
			       struct drm_display_mode *mode,
5967
			       struct drm_display_mode *adjusted_mode,
5968
			       int x, int y,
5969
			       struct drm_framebuffer *old_fb)
5970
{
5971
	struct drm_device *dev = crtc->dev;
5972
	struct drm_i915_private *dev_priv = dev->dev_private;
5973
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5974
	int pipe = intel_crtc->pipe;
5975
	int ret;
2327 Serge 5976
 
2330 Serge 5977
//	drm_vblank_pre_modeset(dev, pipe);
2336 Serge 5978
    ENTER();
2327 Serge 5979
 
2330 Serge 5980
	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5981
					      x, y, old_fb);
2327 Serge 5982
 
2330 Serge 5983
//	drm_vblank_post_modeset(dev, pipe);
2327 Serge 5984
 
2330 Serge 5985
	intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
2336 Serge 5986
    LEAVE();
2327 Serge 5987
 
2330 Serge 5988
	return ret;
5989
}
2327 Serge 5990
 
2342 Serge 5991
static bool intel_eld_uptodate(struct drm_connector *connector,
5992
			       int reg_eldv, uint32_t bits_eldv,
5993
			       int reg_elda, uint32_t bits_elda,
5994
			       int reg_edid)
5995
{
5996
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
5997
	uint8_t *eld = connector->eld;
5998
	uint32_t i;
5999
 
6000
	i = I915_READ(reg_eldv);
6001
	i &= bits_eldv;
6002
 
6003
	if (!eld[0])
6004
		return !i;
6005
 
6006
	if (!i)
6007
		return false;
6008
 
6009
	i = I915_READ(reg_elda);
6010
	i &= ~bits_elda;
6011
	I915_WRITE(reg_elda, i);
6012
 
6013
	for (i = 0; i < eld[2]; i++)
6014
		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6015
			return false;
6016
 
6017
	return true;
6018
}
6019
 
6020
static void g4x_write_eld(struct drm_connector *connector,
6021
			  struct drm_crtc *crtc)
6022
{
6023
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6024
	uint8_t *eld = connector->eld;
6025
	uint32_t eldv;
6026
	uint32_t len;
6027
	uint32_t i;
6028
 
6029
	i = I915_READ(G4X_AUD_VID_DID);
6030
 
6031
	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6032
		eldv = G4X_ELDV_DEVCL_DEVBLC;
6033
	else
6034
		eldv = G4X_ELDV_DEVCTG;
6035
 
6036
	if (intel_eld_uptodate(connector,
6037
			       G4X_AUD_CNTL_ST, eldv,
6038
			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6039
			       G4X_HDMIW_HDMIEDID))
6040
		return;
6041
 
6042
	i = I915_READ(G4X_AUD_CNTL_ST);
6043
	i &= ~(eldv | G4X_ELD_ADDR);
6044
	len = (i >> 9) & 0x1f;		/* ELD buffer size */
6045
	I915_WRITE(G4X_AUD_CNTL_ST, i);
6046
 
6047
	if (!eld[0])
6048
		return;
6049
 
6050
	len = min_t(uint8_t, eld[2], len);
6051
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6052
	for (i = 0; i < len; i++)
6053
		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6054
 
6055
	i = I915_READ(G4X_AUD_CNTL_ST);
6056
	i |= eldv;
6057
	I915_WRITE(G4X_AUD_CNTL_ST, i);
6058
}
6059
 
6060
static void ironlake_write_eld(struct drm_connector *connector,
6061
				     struct drm_crtc *crtc)
6062
{
6063
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6064
	uint8_t *eld = connector->eld;
6065
	uint32_t eldv;
6066
	uint32_t i;
6067
	int len;
6068
	int hdmiw_hdmiedid;
6069
	int aud_cntl_st;
6070
	int aud_cntrl_st2;
6071
 
6072
	if (HAS_PCH_IBX(connector->dev)) {
6073
		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
6074
		aud_cntl_st = IBX_AUD_CNTL_ST_A;
6075
		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6076
	} else {
6077
		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
6078
		aud_cntl_st = CPT_AUD_CNTL_ST_A;
6079
		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6080
	}
6081
 
6082
	i = to_intel_crtc(crtc)->pipe;
6083
	hdmiw_hdmiedid += i * 0x100;
6084
	aud_cntl_st += i * 0x100;
6085
 
6086
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
6087
 
6088
	i = I915_READ(aud_cntl_st);
6089
	i = (i >> 29) & 0x3;		/* DIP_Port_Select, 0x1 = PortB */
6090
	if (!i) {
6091
		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6092
		/* operate blindly on all ports */
6093
		eldv = IBX_ELD_VALIDB;
6094
		eldv |= IBX_ELD_VALIDB << 4;
6095
		eldv |= IBX_ELD_VALIDB << 8;
6096
	} else {
6097
		DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
6098
		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6099
	}
6100
 
6101
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6102
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6103
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
6104
	}
6105
 
6106
	if (intel_eld_uptodate(connector,
6107
			       aud_cntrl_st2, eldv,
6108
			       aud_cntl_st, IBX_ELD_ADDRESS,
6109
			       hdmiw_hdmiedid))
6110
		return;
6111
 
6112
	i = I915_READ(aud_cntrl_st2);
6113
	i &= ~eldv;
6114
	I915_WRITE(aud_cntrl_st2, i);
6115
 
6116
	if (!eld[0])
6117
		return;
6118
 
6119
	i = I915_READ(aud_cntl_st);
6120
	i &= ~IBX_ELD_ADDRESS;
6121
	I915_WRITE(aud_cntl_st, i);
6122
 
6123
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
6124
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6125
	for (i = 0; i < len; i++)
6126
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6127
 
6128
	i = I915_READ(aud_cntrl_st2);
6129
	i |= eldv;
6130
	I915_WRITE(aud_cntrl_st2, i);
6131
}
6132
 
6133
void intel_write_eld(struct drm_encoder *encoder,
6134
		     struct drm_display_mode *mode)
6135
{
6136
	struct drm_crtc *crtc = encoder->crtc;
6137
	struct drm_connector *connector;
6138
	struct drm_device *dev = encoder->dev;
6139
	struct drm_i915_private *dev_priv = dev->dev_private;
6140
 
6141
	connector = drm_select_eld(encoder, mode);
6142
	if (!connector)
6143
		return;
6144
 
6145
	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6146
			 connector->base.id,
6147
			 drm_get_connector_name(connector),
6148
			 connector->encoder->base.id,
6149
			 drm_get_encoder_name(connector->encoder));
6150
 
6151
	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6152
 
6153
	if (dev_priv->display.write_eld)
6154
		dev_priv->display.write_eld(connector, crtc);
6155
}
6156
 
2327 Serge 6157
/** Loads the palette/gamma unit for the CRTC with the prepared values */
6158
void intel_crtc_load_lut(struct drm_crtc *crtc)
6159
{
6160
	struct drm_device *dev = crtc->dev;
6161
	struct drm_i915_private *dev_priv = dev->dev_private;
6162
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6163
	int palreg = PALETTE(intel_crtc->pipe);
6164
	int i;
6165
 
6166
	/* The clocks have to be on to load the palette. */
6167
	if (!crtc->enabled)
6168
		return;
6169
 
6170
	/* use legacy palette for Ironlake */
6171
	if (HAS_PCH_SPLIT(dev))
6172
		palreg = LGC_PALETTE(intel_crtc->pipe);
6173
 
6174
	for (i = 0; i < 256; i++) {
6175
		I915_WRITE(palreg + 4 * i,
6176
			   (intel_crtc->lut_r[i] << 16) |
6177
			   (intel_crtc->lut_g[i] << 8) |
6178
			   intel_crtc->lut_b[i]);
6179
	}
6180
}
6181
 
6182
 
6183
 
6184
 
6185
 
6186
 
6187
 
6188
 
6189
 
6190
 
6191
 
6192
 
6193
 
6194
 
6195
 
6196
 
6197
 
6198
 
6199
 
6200
 
6201
 
6202
 
6203
 
6204
 
6205
 
6206
 
6207
 
6208
 
6209
 
6210
 
6211
 
6212
 
6213
 
6214
 
6215
 
6216
 
6217
 
2332 Serge 6218
/** Sets the color ramps on behalf of RandR */
6219
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6220
				 u16 blue, int regno)
6221
{
6222
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6223
 
2332 Serge 6224
	intel_crtc->lut_r[regno] = red >> 8;
6225
	intel_crtc->lut_g[regno] = green >> 8;
6226
	intel_crtc->lut_b[regno] = blue >> 8;
6227
}
2327 Serge 6228
 
2332 Serge 6229
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6230
			     u16 *blue, int regno)
6231
{
6232
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6233
 
2332 Serge 6234
	*red = intel_crtc->lut_r[regno] << 8;
6235
	*green = intel_crtc->lut_g[regno] << 8;
6236
	*blue = intel_crtc->lut_b[regno] << 8;
6237
}
2327 Serge 6238
 
2330 Serge 6239
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6240
				 u16 *blue, uint32_t start, uint32_t size)
6241
{
6242
	int end = (start + size > 256) ? 256 : start + size, i;
6243
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6244
 
2330 Serge 6245
	for (i = start; i < end; i++) {
6246
		intel_crtc->lut_r[i] = red[i] >> 8;
6247
		intel_crtc->lut_g[i] = green[i] >> 8;
6248
		intel_crtc->lut_b[i] = blue[i] >> 8;
6249
	}
2327 Serge 6250
 
2330 Serge 6251
	intel_crtc_load_lut(crtc);
6252
}
2327 Serge 6253
 
2330 Serge 6254
/**
6255
 * Get a pipe with a simple mode set on it for doing load-based monitor
6256
 * detection.
6257
 *
6258
 * It will be up to the load-detect code to adjust the pipe as appropriate for
6259
 * its requirements.  The pipe will be connected to no other encoders.
6260
 *
6261
 * Currently this code will only succeed if there is a pipe with no encoders
6262
 * configured for it.  In the future, it could choose to temporarily disable
6263
 * some outputs to free up a pipe for its use.
6264
 *
6265
 * \return crtc, or NULL if no pipes are available.
6266
 */
2327 Serge 6267
 
2330 Serge 6268
/* VESA 640x480x72Hz mode to set on the pipe */
6269
static struct drm_display_mode load_detect_mode = {
6270
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6271
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6272
};
2327 Serge 6273
 
6274
 
6275
 
6276
 
6277
 
2330 Serge 6278
static u32
6279
intel_framebuffer_pitch_for_width(int width, int bpp)
6280
{
6281
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
6282
	return ALIGN(pitch, 64);
6283
}
2327 Serge 6284
 
2330 Serge 6285
static u32
6286
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6287
{
6288
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6289
	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
6290
}
2327 Serge 6291
 
2330 Serge 6292
static struct drm_framebuffer *
6293
intel_framebuffer_create_for_mode(struct drm_device *dev,
6294
				  struct drm_display_mode *mode,
6295
				  int depth, int bpp)
6296
{
6297
	struct drm_i915_gem_object *obj;
2344 Serge 6298
	struct drm_mode_fb_cmd2 mode_cmd;
2327 Serge 6299
 
2330 Serge 6300
//	obj = i915_gem_alloc_object(dev,
6301
//				    intel_framebuffer_size_for_mode(mode, bpp));
6302
//	if (obj == NULL)
6303
		return ERR_PTR(-ENOMEM);
2327 Serge 6304
 
2330 Serge 6305
//	mode_cmd.width = mode->hdisplay;
6306
//	mode_cmd.height = mode->vdisplay;
6307
//	mode_cmd.depth = depth;
6308
//	mode_cmd.bpp = bpp;
6309
//	mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
2327 Serge 6310
 
2330 Serge 6311
//	return intel_framebuffer_create(dev, &mode_cmd, obj);
6312
}
2327 Serge 6313
 
2330 Serge 6314
static struct drm_framebuffer *
6315
mode_fits_in_fbdev(struct drm_device *dev,
6316
		   struct drm_display_mode *mode)
6317
{
6318
	struct drm_i915_private *dev_priv = dev->dev_private;
6319
	struct drm_i915_gem_object *obj;
6320
	struct drm_framebuffer *fb;
2327 Serge 6321
 
2330 Serge 6322
//	if (dev_priv->fbdev == NULL)
6323
//		return NULL;
2327 Serge 6324
 
2330 Serge 6325
//	obj = dev_priv->fbdev->ifb.obj;
6326
//	if (obj == NULL)
6327
//		return NULL;
2327 Serge 6328
 
2330 Serge 6329
//	fb = &dev_priv->fbdev->ifb.base;
6330
//	if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
6331
//							  fb->bits_per_pixel))
6332
		return NULL;
2327 Serge 6333
 
2330 Serge 6334
//	if (obj->base.size < mode->vdisplay * fb->pitch)
6335
//		return NULL;
2327 Serge 6336
 
2330 Serge 6337
//	return fb;
6338
}
2327 Serge 6339
 
2330 Serge 6340
bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
6341
				struct drm_connector *connector,
6342
				struct drm_display_mode *mode,
6343
				struct intel_load_detect_pipe *old)
6344
{
6345
	struct intel_crtc *intel_crtc;
6346
	struct drm_crtc *possible_crtc;
6347
	struct drm_encoder *encoder = &intel_encoder->base;
6348
	struct drm_crtc *crtc = NULL;
6349
	struct drm_device *dev = encoder->dev;
6350
	struct drm_framebuffer *old_fb;
6351
	int i = -1;
2327 Serge 6352
 
2330 Serge 6353
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6354
		      connector->base.id, drm_get_connector_name(connector),
6355
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 6356
 
2330 Serge 6357
	/*
6358
	 * Algorithm gets a little messy:
6359
	 *
6360
	 *   - if the connector already has an assigned crtc, use it (but make
6361
	 *     sure it's on first)
6362
	 *
6363
	 *   - try to find the first unused crtc that can drive this connector,
6364
	 *     and use that if we find one
6365
	 */
2327 Serge 6366
 
2330 Serge 6367
	/* See if we already have a CRTC for this connector */
6368
	if (encoder->crtc) {
6369
		crtc = encoder->crtc;
2327 Serge 6370
 
2330 Serge 6371
		intel_crtc = to_intel_crtc(crtc);
6372
		old->dpms_mode = intel_crtc->dpms_mode;
6373
		old->load_detect_temp = false;
2327 Serge 6374
 
2330 Serge 6375
		/* Make sure the crtc and connector are running */
6376
		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6377
			struct drm_encoder_helper_funcs *encoder_funcs;
6378
			struct drm_crtc_helper_funcs *crtc_funcs;
2327 Serge 6379
 
2330 Serge 6380
			crtc_funcs = crtc->helper_private;
6381
			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
2327 Serge 6382
 
2330 Serge 6383
			encoder_funcs = encoder->helper_private;
6384
			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
6385
		}
2327 Serge 6386
 
2330 Serge 6387
		return true;
6388
	}
2327 Serge 6389
 
2330 Serge 6390
	/* Find an unused one (if possible) */
6391
	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6392
		i++;
6393
		if (!(encoder->possible_crtcs & (1 << i)))
6394
			continue;
6395
		if (!possible_crtc->enabled) {
6396
			crtc = possible_crtc;
6397
			break;
6398
		}
6399
	}
2327 Serge 6400
 
2330 Serge 6401
	/*
6402
	 * If we didn't find an unused CRTC, don't use any.
6403
	 */
6404
	if (!crtc) {
6405
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
6406
		return false;
6407
	}
2327 Serge 6408
 
2330 Serge 6409
	encoder->crtc = crtc;
6410
	connector->encoder = encoder;
2327 Serge 6411
 
2330 Serge 6412
	intel_crtc = to_intel_crtc(crtc);
6413
	old->dpms_mode = intel_crtc->dpms_mode;
6414
	old->load_detect_temp = true;
6415
	old->release_fb = NULL;
2327 Serge 6416
 
2330 Serge 6417
	if (!mode)
6418
		mode = &load_detect_mode;
2327 Serge 6419
 
2330 Serge 6420
	old_fb = crtc->fb;
2327 Serge 6421
 
2330 Serge 6422
	/* We need a framebuffer large enough to accommodate all accesses
6423
	 * that the plane may generate whilst we perform load detection.
6424
	 * We can not rely on the fbcon either being present (we get called
6425
	 * during its initialisation to detect all boot displays, or it may
6426
	 * not even exist) or that it is large enough to satisfy the
6427
	 * requested mode.
6428
	 */
6429
	crtc->fb = mode_fits_in_fbdev(dev, mode);
6430
	if (crtc->fb == NULL) {
6431
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6432
		crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
6433
		old->release_fb = crtc->fb;
6434
	} else
6435
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6436
	if (IS_ERR(crtc->fb)) {
6437
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6438
		crtc->fb = old_fb;
6439
		return false;
6440
	}
2327 Serge 6441
 
2330 Serge 6442
	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6443
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6444
		if (old->release_fb)
6445
			old->release_fb->funcs->destroy(old->release_fb);
6446
		crtc->fb = old_fb;
6447
		return false;
6448
	}
2327 Serge 6449
 
2330 Serge 6450
	/* let the connector get through one full cycle before testing */
6451
	intel_wait_for_vblank(dev, intel_crtc->pipe);
2327 Serge 6452
 
2330 Serge 6453
	return true;
6454
}
2327 Serge 6455
 
2330 Serge 6456
void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
6457
				    struct drm_connector *connector,
6458
				    struct intel_load_detect_pipe *old)
6459
{
6460
	struct drm_encoder *encoder = &intel_encoder->base;
6461
	struct drm_device *dev = encoder->dev;
6462
	struct drm_crtc *crtc = encoder->crtc;
6463
	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
6464
	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2327 Serge 6465
 
2330 Serge 6466
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6467
		      connector->base.id, drm_get_connector_name(connector),
6468
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 6469
 
2330 Serge 6470
	if (old->load_detect_temp) {
6471
		connector->encoder = NULL;
6472
		drm_helper_disable_unused_functions(dev);
2327 Serge 6473
 
2330 Serge 6474
		if (old->release_fb)
6475
			old->release_fb->funcs->destroy(old->release_fb);
2327 Serge 6476
 
2330 Serge 6477
		return;
6478
	}
2327 Serge 6479
 
2330 Serge 6480
	/* Switch crtc and encoder back off if necessary */
6481
	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6482
		encoder_funcs->dpms(encoder, old->dpms_mode);
6483
		crtc_funcs->dpms(crtc, old->dpms_mode);
6484
	}
6485
}
2327 Serge 6486
 
2330 Serge 6487
/* Returns the clock of the currently programmed mode of the given pipe. */
6488
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6489
{
6490
	struct drm_i915_private *dev_priv = dev->dev_private;
6491
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6492
	int pipe = intel_crtc->pipe;
6493
	u32 dpll = I915_READ(DPLL(pipe));
6494
	u32 fp;
6495
	intel_clock_t clock;
2327 Serge 6496
 
2330 Serge 6497
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6498
		fp = I915_READ(FP0(pipe));
6499
	else
6500
		fp = I915_READ(FP1(pipe));
2327 Serge 6501
 
2330 Serge 6502
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6503
	if (IS_PINEVIEW(dev)) {
6504
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6505
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6506
	} else {
6507
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6508
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6509
	}
2327 Serge 6510
 
2330 Serge 6511
	if (!IS_GEN2(dev)) {
6512
		if (IS_PINEVIEW(dev))
6513
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6514
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6515
		else
6516
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6517
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 6518
 
2330 Serge 6519
		switch (dpll & DPLL_MODE_MASK) {
6520
		case DPLLB_MODE_DAC_SERIAL:
6521
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6522
				5 : 10;
6523
			break;
6524
		case DPLLB_MODE_LVDS:
6525
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6526
				7 : 14;
6527
			break;
6528
		default:
6529
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6530
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
6531
			return 0;
6532
		}
2327 Serge 6533
 
2330 Serge 6534
		/* XXX: Handle the 100Mhz refclk */
6535
		intel_clock(dev, 96000, &clock);
6536
	} else {
6537
		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
2327 Serge 6538
 
2330 Serge 6539
		if (is_lvds) {
6540
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6541
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6542
			clock.p2 = 14;
2327 Serge 6543
 
2330 Serge 6544
			if ((dpll & PLL_REF_INPUT_MASK) ==
6545
			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6546
				/* XXX: might not be 66MHz */
6547
				intel_clock(dev, 66000, &clock);
6548
			} else
6549
				intel_clock(dev, 48000, &clock);
6550
		} else {
6551
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6552
				clock.p1 = 2;
6553
			else {
6554
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6555
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6556
			}
6557
			if (dpll & PLL_P2_DIVIDE_BY_4)
6558
				clock.p2 = 4;
6559
			else
6560
				clock.p2 = 2;
2327 Serge 6561
 
2330 Serge 6562
			intel_clock(dev, 48000, &clock);
6563
		}
6564
	}
2327 Serge 6565
 
2330 Serge 6566
	/* XXX: It would be nice to validate the clocks, but we can't reuse
6567
	 * i830PllIsValid() because it relies on the xf86_config connector
6568
	 * configuration being accurate, which it isn't necessarily.
6569
	 */
2327 Serge 6570
 
2330 Serge 6571
	return clock.dot;
6572
}
2327 Serge 6573
 
2330 Serge 6574
/** Returns the currently programmed mode of the given pipe. */
6575
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6576
					     struct drm_crtc *crtc)
6577
{
6578
	struct drm_i915_private *dev_priv = dev->dev_private;
6579
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6580
	int pipe = intel_crtc->pipe;
6581
	struct drm_display_mode *mode;
6582
	int htot = I915_READ(HTOTAL(pipe));
6583
	int hsync = I915_READ(HSYNC(pipe));
6584
	int vtot = I915_READ(VTOTAL(pipe));
6585
	int vsync = I915_READ(VSYNC(pipe));
2327 Serge 6586
 
2330 Serge 6587
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6588
	if (!mode)
6589
		return NULL;
6590
 
6591
	mode->clock = intel_crtc_clock_get(dev, crtc);
6592
	mode->hdisplay = (htot & 0xffff) + 1;
6593
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6594
	mode->hsync_start = (hsync & 0xffff) + 1;
6595
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6596
	mode->vdisplay = (vtot & 0xffff) + 1;
6597
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6598
	mode->vsync_start = (vsync & 0xffff) + 1;
6599
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6600
 
6601
	drm_mode_set_name(mode);
6602
	drm_mode_set_crtcinfo(mode, 0);
6603
 
6604
	return mode;
6605
}
6606
 
6607
#define GPU_IDLE_TIMEOUT 500 /* ms */
6608
 
6609
 
6610
 
6611
 
6612
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
6613
 
6614
 
6615
 
6616
 
2327 Serge 6617
static void intel_increase_pllclock(struct drm_crtc *crtc)
6618
{
6619
	struct drm_device *dev = crtc->dev;
6620
	drm_i915_private_t *dev_priv = dev->dev_private;
6621
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6622
	int pipe = intel_crtc->pipe;
6623
	int dpll_reg = DPLL(pipe);
6624
	int dpll;
6625
 
2336 Serge 6626
    ENTER();
6627
 
2327 Serge 6628
	if (HAS_PCH_SPLIT(dev))
6629
		return;
6630
 
6631
	if (!dev_priv->lvds_downclock_avail)
6632
		return;
6633
 
6634
	dpll = I915_READ(dpll_reg);
6635
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
6636
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
6637
 
6638
		/* Unlock panel regs */
6639
		I915_WRITE(PP_CONTROL,
6640
			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
6641
 
6642
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
6643
		I915_WRITE(dpll_reg, dpll);
6644
		intel_wait_for_vblank(dev, pipe);
6645
 
6646
		dpll = I915_READ(dpll_reg);
6647
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
6648
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
6649
 
6650
		/* ...and lock them again */
6651
		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
6652
	}
6653
 
2336 Serge 6654
    LEAVE();
6655
 
2327 Serge 6656
	/* Schedule downclock */
6657
}
6658
 
6659
 
6660
 
6661
 
6662
 
6663
 
6664
 
6665
 
6666
 
6667
 
6668
 
6669
 
6670
 
6671
 
6672
 
6673
 
6674
 
6675
 
6676
 
6677
 
6678
 
6679
 
2330 Serge 6680
static void intel_crtc_destroy(struct drm_crtc *crtc)
6681
{
6682
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6683
	struct drm_device *dev = crtc->dev;
6684
	struct intel_unpin_work *work;
6685
	unsigned long flags;
2327 Serge 6686
 
2330 Serge 6687
	spin_lock_irqsave(&dev->event_lock, flags);
6688
	work = intel_crtc->unpin_work;
6689
	intel_crtc->unpin_work = NULL;
6690
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 6691
 
2330 Serge 6692
	if (work) {
6693
//		cancel_work_sync(&work->work);
6694
		kfree(work);
6695
	}
2327 Serge 6696
 
2330 Serge 6697
	drm_crtc_cleanup(crtc);
2327 Serge 6698
 
2330 Serge 6699
	kfree(intel_crtc);
6700
}
2327 Serge 6701
 
6702
 
6703
 
6704
 
6705
 
6706
 
6707
 
6708
 
6709
 
6710
 
6711
 
6712
 
6713
 
6714
 
6715
 
6716
 
6717
 
6718
 
6719
 
6720
 
6721
 
6722
 
6723
 
6724
 
6725
 
6726
 
6727
 
6728
 
6729
 
6730
 
6731
 
6732
 
6733
 
6734
 
6735
 
6736
 
6737
 
6738
 
6739
 
6740
 
6741
 
6742
 
6743
 
6744
 
6745
 
6746
 
6747
 
6748
 
6749
 
6750
 
6751
 
6752
 
6753
 
6754
 
6755
 
6756
 
6757
 
6758
 
6759
 
6760
 
6761
 
6762
 
6763
 
6764
 
6765
 
6766
 
2330 Serge 6767
static void intel_sanitize_modesetting(struct drm_device *dev,
6768
				       int pipe, int plane)
6769
{
6770
	struct drm_i915_private *dev_priv = dev->dev_private;
6771
	u32 reg, val;
2327 Serge 6772
 
2330 Serge 6773
	if (HAS_PCH_SPLIT(dev))
6774
		return;
2327 Serge 6775
 
2330 Serge 6776
	/* Who knows what state these registers were left in by the BIOS or
6777
	 * grub?
6778
	 *
6779
	 * If we leave the registers in a conflicting state (e.g. with the
6780
	 * display plane reading from the other pipe than the one we intend
6781
	 * to use) then when we attempt to teardown the active mode, we will
6782
	 * not disable the pipes and planes in the correct order -- leaving
6783
	 * a plane reading from a disabled pipe and possibly leading to
6784
	 * undefined behaviour.
6785
	 */
2327 Serge 6786
 
2330 Serge 6787
	reg = DSPCNTR(plane);
6788
	val = I915_READ(reg);
2327 Serge 6789
 
2330 Serge 6790
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
6791
		return;
6792
	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6793
		return;
2327 Serge 6794
 
2330 Serge 6795
	/* This display plane is active and attached to the other CPU pipe. */
6796
	pipe = !pipe;
2327 Serge 6797
 
2330 Serge 6798
	/* Disable the plane and wait for it to stop reading from the pipe. */
6799
	intel_disable_plane(dev_priv, plane, pipe);
6800
	intel_disable_pipe(dev_priv, pipe);
6801
}
2327 Serge 6802
 
2330 Serge 6803
static void intel_crtc_reset(struct drm_crtc *crtc)
6804
{
6805
	struct drm_device *dev = crtc->dev;
6806
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6807
 
2330 Serge 6808
	/* Reset flags back to the 'unknown' status so that they
6809
	 * will be correctly set on the initial modeset.
6810
	 */
6811
	intel_crtc->dpms_mode = -1;
2327 Serge 6812
 
2330 Serge 6813
	/* We need to fix up any BIOS configuration that conflicts with
6814
	 * our expectations.
6815
	 */
6816
	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6817
}
2327 Serge 6818
 
2330 Serge 6819
static struct drm_crtc_helper_funcs intel_helper_funcs = {
6820
	.dpms = intel_crtc_dpms,
6821
	.mode_fixup = intel_crtc_mode_fixup,
6822
	.mode_set = intel_crtc_mode_set,
6823
	.mode_set_base = intel_pipe_set_base,
6824
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
6825
	.load_lut = intel_crtc_load_lut,
6826
	.disable = intel_crtc_disable,
6827
};
2327 Serge 6828
 
2330 Serge 6829
static const struct drm_crtc_funcs intel_crtc_funcs = {
6830
	.reset = intel_crtc_reset,
6831
//	.cursor_set = intel_crtc_cursor_set,
6832
//	.cursor_move = intel_crtc_cursor_move,
6833
	.gamma_set = intel_crtc_gamma_set,
6834
	.set_config = drm_crtc_helper_set_config,
6835
	.destroy = intel_crtc_destroy,
6836
//	.page_flip = intel_crtc_page_flip,
6837
};
2327 Serge 6838
 
2330 Serge 6839
static void intel_crtc_init(struct drm_device *dev, int pipe)
6840
{
6841
	drm_i915_private_t *dev_priv = dev->dev_private;
6842
	struct intel_crtc *intel_crtc;
6843
	int i;
2327 Serge 6844
 
2330 Serge 6845
	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
6846
	if (intel_crtc == NULL)
6847
		return;
2327 Serge 6848
 
2330 Serge 6849
	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
2327 Serge 6850
 
2330 Serge 6851
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
6852
	for (i = 0; i < 256; i++) {
6853
		intel_crtc->lut_r[i] = i;
6854
		intel_crtc->lut_g[i] = i;
6855
		intel_crtc->lut_b[i] = i;
6856
	}
2327 Serge 6857
 
2330 Serge 6858
	/* Swap pipes & planes for FBC on pre-965 */
6859
	intel_crtc->pipe = pipe;
6860
	intel_crtc->plane = pipe;
6861
	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
6862
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
6863
		intel_crtc->plane = !pipe;
6864
	}
2327 Serge 6865
 
2330 Serge 6866
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
6867
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
6868
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6869
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 6870
 
2330 Serge 6871
	intel_crtc_reset(&intel_crtc->base);
6872
	intel_crtc->active = true; /* force the pipe off on setup_init_config */
6873
	intel_crtc->bpp = 24; /* default for pre-Ironlake */
2327 Serge 6874
 
2330 Serge 6875
	if (HAS_PCH_SPLIT(dev)) {
2342 Serge 6876
		if (pipe == 2 && IS_IVYBRIDGE(dev))
6877
			intel_crtc->no_pll = true;
2330 Serge 6878
		intel_helper_funcs.prepare = ironlake_crtc_prepare;
6879
		intel_helper_funcs.commit = ironlake_crtc_commit;
6880
	} else {
6881
		intel_helper_funcs.prepare = i9xx_crtc_prepare;
6882
		intel_helper_funcs.commit = i9xx_crtc_commit;
6883
	}
2327 Serge 6884
 
2330 Serge 6885
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
2327 Serge 6886
 
2330 Serge 6887
	intel_crtc->busy = false;
2327 Serge 6888
 
2330 Serge 6889
}
2327 Serge 6890
 
6891
 
6892
 
6893
 
6894
 
6895
 
6896
 
2330 Serge 6897
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
6898
{
6899
	struct intel_encoder *encoder;
6900
	int index_mask = 0;
6901
	int entry = 0;
2327 Serge 6902
 
2330 Serge 6903
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6904
		if (type_mask & encoder->clone_mask)
6905
			index_mask |= (1 << entry);
6906
		entry++;
6907
	}
2327 Serge 6908
 
2330 Serge 6909
	return index_mask;
6910
}
2327 Serge 6911
 
2330 Serge 6912
static bool has_edp_a(struct drm_device *dev)
6913
{
6914
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 6915
 
2330 Serge 6916
	if (!IS_MOBILE(dev))
6917
		return false;
2327 Serge 6918
 
2330 Serge 6919
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6920
		return false;
2327 Serge 6921
 
2330 Serge 6922
	if (IS_GEN5(dev) &&
6923
	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6924
		return false;
2327 Serge 6925
 
2330 Serge 6926
	return true;
6927
}
2327 Serge 6928
 
2330 Serge 6929
static void intel_setup_outputs(struct drm_device *dev)
6930
{
6931
	struct drm_i915_private *dev_priv = dev->dev_private;
6932
	struct intel_encoder *encoder;
6933
	bool dpd_is_edp = false;
6934
	bool has_lvds = false;
2327 Serge 6935
 
2336 Serge 6936
    ENTER();
6937
 
2330 Serge 6938
	if (IS_MOBILE(dev) && !IS_I830(dev))
6939
		has_lvds = intel_lvds_init(dev);
6940
	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6941
		/* disable the panel fitter on everything but LVDS */
6942
		I915_WRITE(PFIT_CONTROL, 0);
6943
	}
2327 Serge 6944
 
2330 Serge 6945
	if (HAS_PCH_SPLIT(dev)) {
6946
		dpd_is_edp = intel_dpd_is_edp(dev);
2327 Serge 6947
 
2330 Serge 6948
		if (has_edp_a(dev))
6949
			intel_dp_init(dev, DP_A);
2327 Serge 6950
 
2330 Serge 6951
		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6952
			intel_dp_init(dev, PCH_DP_D);
6953
	}
2327 Serge 6954
 
2330 Serge 6955
	intel_crt_init(dev);
2327 Serge 6956
 
2330 Serge 6957
	if (HAS_PCH_SPLIT(dev)) {
6958
		int found;
2327 Serge 6959
 
2330 Serge 6960
		if (I915_READ(HDMIB) & PORT_DETECTED) {
6961
			/* PCH SDVOB multiplex with HDMIB */
6962
			found = intel_sdvo_init(dev, PCH_SDVOB);
6963
			if (!found)
6964
				intel_hdmi_init(dev, HDMIB);
6965
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6966
				intel_dp_init(dev, PCH_DP_B);
6967
		}
2327 Serge 6968
 
2330 Serge 6969
		if (I915_READ(HDMIC) & PORT_DETECTED)
6970
			intel_hdmi_init(dev, HDMIC);
2327 Serge 6971
 
2330 Serge 6972
		if (I915_READ(HDMID) & PORT_DETECTED)
6973
			intel_hdmi_init(dev, HDMID);
2327 Serge 6974
 
2330 Serge 6975
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
6976
			intel_dp_init(dev, PCH_DP_C);
2327 Serge 6977
 
2330 Serge 6978
		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6979
			intel_dp_init(dev, PCH_DP_D);
2327 Serge 6980
 
2330 Serge 6981
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6982
		bool found = false;
2327 Serge 6983
 
2330 Serge 6984
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
6985
			DRM_DEBUG_KMS("probing SDVOB\n");
6986
			found = intel_sdvo_init(dev, SDVOB);
6987
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6988
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6989
				intel_hdmi_init(dev, SDVOB);
6990
			}
2327 Serge 6991
 
2330 Serge 6992
			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6993
				DRM_DEBUG_KMS("probing DP_B\n");
6994
				intel_dp_init(dev, DP_B);
6995
			}
6996
		}
2327 Serge 6997
 
2330 Serge 6998
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 6999
 
2330 Serge 7000
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
7001
			DRM_DEBUG_KMS("probing SDVOC\n");
7002
			found = intel_sdvo_init(dev, SDVOC);
7003
		}
2327 Serge 7004
 
2330 Serge 7005
		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
2327 Serge 7006
 
2330 Serge 7007
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
7008
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
7009
				intel_hdmi_init(dev, SDVOC);
7010
			}
7011
			if (SUPPORTS_INTEGRATED_DP(dev)) {
7012
				DRM_DEBUG_KMS("probing DP_C\n");
7013
				intel_dp_init(dev, DP_C);
7014
			}
7015
		}
2327 Serge 7016
 
2330 Serge 7017
		if (SUPPORTS_INTEGRATED_DP(dev) &&
7018
		    (I915_READ(DP_D) & DP_DETECTED)) {
7019
			DRM_DEBUG_KMS("probing DP_D\n");
7020
			intel_dp_init(dev, DP_D);
7021
		}
7022
	} else if (IS_GEN2(dev))
7023
		intel_dvo_init(dev);
2327 Serge 7024
 
2330 Serge 7025
//   if (SUPPORTS_TV(dev))
7026
//       intel_tv_init(dev);
2327 Serge 7027
 
2330 Serge 7028
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7029
		encoder->base.possible_crtcs = encoder->crtc_mask;
7030
		encoder->base.possible_clones =
7031
			intel_encoder_clones(dev, encoder->clone_mask);
7032
	}
2327 Serge 7033
 
2330 Serge 7034
	/* disable all the possible outputs/crtcs before entering KMS mode */
7035
//	drm_helper_disable_unused_functions(dev);
2336 Serge 7036
 
2342 Serge 7037
	if (HAS_PCH_SPLIT(dev))
7038
		ironlake_init_pch_refclk(dev);
7039
 
2336 Serge 7040
    LEAVE();
2330 Serge 7041
}
7042
 
7043
 
7044
 
7045
 
2327 Serge 7046
static const struct drm_mode_config_funcs intel_mode_funcs = {
7047
	.fb_create = NULL /*intel_user_framebuffer_create*/,
7048
	.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
7049
};
7050
 
7051
 
7052
 
7053
 
2335 Serge 7054
static const struct drm_framebuffer_funcs intel_fb_funcs = {
7055
//	.destroy = intel_user_framebuffer_destroy,
7056
//	.create_handle = intel_user_framebuffer_create_handle,
7057
};
2327 Serge 7058
 
2335 Serge 7059
int intel_framebuffer_init(struct drm_device *dev,
7060
			   struct intel_framebuffer *intel_fb,
2342 Serge 7061
			   struct drm_mode_fb_cmd2 *mode_cmd,
2335 Serge 7062
			   struct drm_i915_gem_object *obj)
7063
{
7064
	int ret;
2327 Serge 7065
 
2335 Serge 7066
	if (obj->tiling_mode == I915_TILING_Y)
7067
		return -EINVAL;
2327 Serge 7068
 
2342 Serge 7069
	if (mode_cmd->pitches[0] & 63)
2335 Serge 7070
			return -EINVAL;
2327 Serge 7071
 
2342 Serge 7072
	switch (mode_cmd->pixel_format) {
7073
	case DRM_FORMAT_RGB332:
7074
	case DRM_FORMAT_RGB565:
7075
	case DRM_FORMAT_XRGB8888:
7076
	case DRM_FORMAT_ARGB8888:
7077
	case DRM_FORMAT_XRGB2101010:
7078
	case DRM_FORMAT_ARGB2101010:
7079
		/* RGB formats are common across chipsets */
2335 Serge 7080
		break;
2342 Serge 7081
	case DRM_FORMAT_YUYV:
7082
	case DRM_FORMAT_UYVY:
7083
	case DRM_FORMAT_YVYU:
7084
	case DRM_FORMAT_VYUY:
7085
		break;
2335 Serge 7086
	default:
2342 Serge 7087
		DRM_ERROR("unsupported pixel format\n");
2335 Serge 7088
		return -EINVAL;
7089
	}
2327 Serge 7090
 
2335 Serge 7091
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
7092
	if (ret) {
7093
		DRM_ERROR("framebuffer init failed %d\n", ret);
7094
		return ret;
7095
	}
2327 Serge 7096
 
2335 Serge 7097
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
7098
	intel_fb->obj = obj;
7099
	return 0;
7100
}
2327 Serge 7101
 
7102
 
7103
 
7104
 
7105
 
7106
 
7107
 
7108
 
7109
 
7110
 
7111
 
7112
 
2330 Serge 7113
bool ironlake_set_drps(struct drm_device *dev, u8 val)
7114
{
7115
	struct drm_i915_private *dev_priv = dev->dev_private;
7116
	u16 rgvswctl;
2327 Serge 7117
 
2330 Serge 7118
	rgvswctl = I915_READ16(MEMSWCTL);
7119
	if (rgvswctl & MEMCTL_CMD_STS) {
7120
		DRM_DEBUG("gpu busy, RCS change rejected\n");
7121
		return false; /* still busy with another command */
7122
	}
2327 Serge 7123
 
2330 Serge 7124
	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
7125
		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
7126
	I915_WRITE16(MEMSWCTL, rgvswctl);
7127
	POSTING_READ16(MEMSWCTL);
2327 Serge 7128
 
2330 Serge 7129
	rgvswctl |= MEMCTL_CMD_STS;
7130
	I915_WRITE16(MEMSWCTL, rgvswctl);
2327 Serge 7131
 
2330 Serge 7132
	return true;
7133
}
2327 Serge 7134
 
2330 Serge 7135
void ironlake_enable_drps(struct drm_device *dev)
7136
{
7137
	struct drm_i915_private *dev_priv = dev->dev_private;
7138
	u32 rgvmodectl = I915_READ(MEMMODECTL);
7139
	u8 fmax, fmin, fstart, vstart;
2327 Serge 7140
 
2330 Serge 7141
	/* Enable temp reporting */
7142
	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
7143
	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2327 Serge 7144
 
2330 Serge 7145
	/* 100ms RC evaluation intervals */
7146
	I915_WRITE(RCUPEI, 100000);
7147
	I915_WRITE(RCDNEI, 100000);
2327 Serge 7148
 
2330 Serge 7149
	/* Set max/min thresholds to 90ms and 80ms respectively */
7150
	I915_WRITE(RCBMAXAVG, 90000);
7151
	I915_WRITE(RCBMINAVG, 80000);
2327 Serge 7152
 
2330 Serge 7153
	I915_WRITE(MEMIHYST, 1);
2327 Serge 7154
 
2330 Serge 7155
	/* Set up min, max, and cur for interrupt handling */
7156
	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
7157
	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
7158
	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
7159
		MEMMODE_FSTART_SHIFT;
2327 Serge 7160
 
2330 Serge 7161
	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
7162
		PXVFREQ_PX_SHIFT;
2327 Serge 7163
 
2330 Serge 7164
	dev_priv->fmax = fmax; /* IPS callback will increase this */
7165
	dev_priv->fstart = fstart;
2327 Serge 7166
 
2330 Serge 7167
	dev_priv->max_delay = fstart;
7168
	dev_priv->min_delay = fmin;
7169
	dev_priv->cur_delay = fstart;
2327 Serge 7170
 
2330 Serge 7171
	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
7172
			 fmax, fmin, fstart);
2327 Serge 7173
 
2330 Serge 7174
	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2327 Serge 7175
 
2330 Serge 7176
	/*
7177
	 * Interrupts will be enabled in ironlake_irq_postinstall
7178
	 */
2327 Serge 7179
 
2330 Serge 7180
	I915_WRITE(VIDSTART, vstart);
7181
	POSTING_READ(VIDSTART);
2327 Serge 7182
 
2330 Serge 7183
	rgvmodectl |= MEMMODE_SWMODE_EN;
7184
	I915_WRITE(MEMMODECTL, rgvmodectl);
2327 Serge 7185
 
2330 Serge 7186
	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
7187
		DRM_ERROR("stuck trying to change perf mode\n");
7188
	msleep(1);
2327 Serge 7189
 
2330 Serge 7190
	ironlake_set_drps(dev, fstart);
2327 Serge 7191
 
2330 Serge 7192
	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
7193
		I915_READ(0x112e0);
7194
//   dev_priv->last_time1 = jiffies_to_msecs(jiffies);
7195
	dev_priv->last_count2 = I915_READ(0x112f4);
7196
//   getrawmonotonic(&dev_priv->last_time2);
7197
}
2327 Serge 7198
 
7199
 
7200
 
7201
 
7202
 
7203
 
7204
 
7205
 
7206
 
7207
 
7208
 
7209
 
2330 Serge 7210
static unsigned long intel_pxfreq(u32 vidfreq)
7211
{
7212
	unsigned long freq;
7213
	int div = (vidfreq & 0x3f0000) >> 16;
7214
	int post = (vidfreq & 0x3000) >> 12;
7215
	int pre = (vidfreq & 0x7);
2327 Serge 7216
 
2330 Serge 7217
	if (!pre)
7218
		return 0;
2327 Serge 7219
 
2330 Serge 7220
	freq = ((div * 133333) / ((1<
2327 Serge 7221
 
2330 Serge 7222
	return freq;
7223
}
2327 Serge 7224
 
2330 Serge 7225
void intel_init_emon(struct drm_device *dev)
7226
{
7227
	struct drm_i915_private *dev_priv = dev->dev_private;
7228
	u32 lcfuse;
7229
	u8 pxw[16];
7230
	int i;
2327 Serge 7231
 
2330 Serge 7232
	/* Disable to program */
7233
	I915_WRITE(ECR, 0);
7234
	POSTING_READ(ECR);
2327 Serge 7235
 
2330 Serge 7236
	/* Program energy weights for various events */
7237
	I915_WRITE(SDEW, 0x15040d00);
7238
	I915_WRITE(CSIEW0, 0x007f0000);
7239
	I915_WRITE(CSIEW1, 0x1e220004);
7240
	I915_WRITE(CSIEW2, 0x04000004);
2327 Serge 7241
 
2330 Serge 7242
	for (i = 0; i < 5; i++)
7243
		I915_WRITE(PEW + (i * 4), 0);
7244
	for (i = 0; i < 3; i++)
7245
		I915_WRITE(DEW + (i * 4), 0);
2327 Serge 7246
 
2330 Serge 7247
	/* Program P-state weights to account for frequency power adjustment */
7248
	for (i = 0; i < 16; i++) {
7249
		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
7250
		unsigned long freq = intel_pxfreq(pxvidfreq);
7251
		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
7252
			PXVFREQ_PX_SHIFT;
7253
		unsigned long val;
2327 Serge 7254
 
2330 Serge 7255
		val = vid * vid;
7256
		val *= (freq / 1000);
7257
		val *= 255;
7258
		val /= (127*127*900);
7259
		if (val > 0xff)
7260
			DRM_ERROR("bad pxval: %ld\n", val);
7261
		pxw[i] = val;
7262
	}
7263
	/* Render standby states get 0 weight */
7264
	pxw[14] = 0;
7265
	pxw[15] = 0;
2327 Serge 7266
 
2330 Serge 7267
	for (i = 0; i < 4; i++) {
7268
		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
7269
			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
7270
		I915_WRITE(PXW + (i * 4), val);
7271
	}
2327 Serge 7272
 
2330 Serge 7273
	/* Adjust magic regs to magic values (more experimental results) */
7274
	I915_WRITE(OGW0, 0);
7275
	I915_WRITE(OGW1, 0);
7276
	I915_WRITE(EG0, 0x00007f00);
7277
	I915_WRITE(EG1, 0x0000000e);
7278
	I915_WRITE(EG2, 0x000e0000);
7279
	I915_WRITE(EG3, 0x68000300);
7280
	I915_WRITE(EG4, 0x42000000);
7281
	I915_WRITE(EG5, 0x00140031);
7282
	I915_WRITE(EG6, 0);
7283
	I915_WRITE(EG7, 0);
2327 Serge 7284
 
2330 Serge 7285
	for (i = 0; i < 8; i++)
7286
		I915_WRITE(PXWL + (i * 4), 0);
2327 Serge 7287
 
2330 Serge 7288
	/* Enable PMON + select events */
7289
	I915_WRITE(ECR, 0x80000019);
2327 Serge 7290
 
2330 Serge 7291
	lcfuse = I915_READ(LCFUSE02);
7292
 
7293
	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
7294
}
7295
 
2342 Serge 7296
static bool intel_enable_rc6(struct drm_device *dev)
7297
{
7298
	/*
7299
	 * Respect the kernel parameter if it is set
7300
	 */
7301
	if (i915_enable_rc6 >= 0)
7302
		return i915_enable_rc6;
7303
 
7304
	/*
7305
	 * Disable RC6 on Ironlake
7306
	 */
7307
	if (INTEL_INFO(dev)->gen == 5)
7308
		return 0;
7309
 
7310
	/*
7311
	 * Disable rc6 on Sandybridge
7312
	 */
7313
	if (INTEL_INFO(dev)->gen == 6) {
7314
		DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
7315
		return 0;
7316
	}
7317
	DRM_DEBUG_DRIVER("RC6 enabled\n");
7318
	return 1;
7319
}
7320
 
2330 Serge 7321
void gen6_enable_rps(struct drm_i915_private *dev_priv)
7322
{
7323
	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
7324
	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
7325
	u32 pcu_mbox, rc6_mask = 0;
7326
	int cur_freq, min_freq, max_freq;
7327
	int i;
7328
 
7329
	/* Here begins a magic sequence of register writes to enable
7330
	 * auto-downclocking.
7331
	 *
7332
	 * Perhaps there might be some value in exposing these to
7333
	 * userspace...
7334
	 */
7335
	I915_WRITE(GEN6_RC_STATE, 0);
7336
	mutex_lock(&dev_priv->dev->struct_mutex);
7337
	gen6_gt_force_wake_get(dev_priv);
7338
 
7339
	/* disable the counters and set deterministic thresholds */
7340
	I915_WRITE(GEN6_RC_CONTROL, 0);
7341
 
7342
	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
7343
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
7344
	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
7345
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
7346
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
7347
 
7348
	for (i = 0; i < I915_NUM_RINGS; i++)
7349
		I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
7350
 
7351
	I915_WRITE(GEN6_RC_SLEEP, 0);
7352
	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
7353
	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
7354
	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
7355
	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
7356
 
2342 Serge 7357
	if (intel_enable_rc6(dev_priv->dev))
2330 Serge 7358
		rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
7359
			GEN6_RC_CTL_RC6_ENABLE;
7360
 
7361
	I915_WRITE(GEN6_RC_CONTROL,
7362
		   rc6_mask |
7363
		   GEN6_RC_CTL_EI_MODE(1) |
7364
		   GEN6_RC_CTL_HW_ENABLE);
7365
 
7366
	I915_WRITE(GEN6_RPNSWREQ,
7367
		   GEN6_FREQUENCY(10) |
7368
		   GEN6_OFFSET(0) |
7369
		   GEN6_AGGRESSIVE_TURBO);
7370
	I915_WRITE(GEN6_RC_VIDEO_FREQ,
7371
		   GEN6_FREQUENCY(12));
7372
 
7373
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7374
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
7375
		   18 << 24 |
7376
		   6 << 16);
7377
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
7378
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
7379
	I915_WRITE(GEN6_RP_UP_EI, 100000);
7380
	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
7381
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7382
	I915_WRITE(GEN6_RP_CONTROL,
7383
		   GEN6_RP_MEDIA_TURBO |
2342 Serge 7384
		   GEN6_RP_MEDIA_HW_MODE |
2330 Serge 7385
		   GEN6_RP_MEDIA_IS_GFX |
7386
		   GEN6_RP_ENABLE |
7387
		   GEN6_RP_UP_BUSY_AVG |
7388
		   GEN6_RP_DOWN_IDLE_CONT);
7389
 
7390
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7391
		     500))
7392
		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
7393
 
7394
	I915_WRITE(GEN6_PCODE_DATA, 0);
7395
	I915_WRITE(GEN6_PCODE_MAILBOX,
7396
		   GEN6_PCODE_READY |
7397
		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7398
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7399
		     500))
7400
		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
7401
 
7402
	min_freq = (rp_state_cap & 0xff0000) >> 16;
7403
	max_freq = rp_state_cap & 0xff;
7404
	cur_freq = (gt_perf_status & 0xff00) >> 8;
7405
 
7406
	/* Check for overclock support */
7407
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7408
		     500))
7409
		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
7410
	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
7411
	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
7412
	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7413
		     500))
7414
		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
7415
	if (pcu_mbox & (1<<31)) { /* OC supported */
7416
		max_freq = pcu_mbox & 0xff;
7417
		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
7418
	}
7419
 
7420
	/* In units of 100MHz */
7421
	dev_priv->max_delay = max_freq;
7422
	dev_priv->min_delay = min_freq;
7423
	dev_priv->cur_delay = cur_freq;
7424
 
7425
	/* requires MSI enabled */
7426
	I915_WRITE(GEN6_PMIER,
7427
		   GEN6_PM_MBOX_EVENT |
7428
		   GEN6_PM_THERMAL_EVENT |
7429
		   GEN6_PM_RP_DOWN_TIMEOUT |
7430
		   GEN6_PM_RP_UP_THRESHOLD |
7431
		   GEN6_PM_RP_DOWN_THRESHOLD |
7432
		   GEN6_PM_RP_UP_EI_EXPIRED |
7433
		   GEN6_PM_RP_DOWN_EI_EXPIRED);
7434
//   spin_lock_irq(&dev_priv->rps_lock);
7435
//   WARN_ON(dev_priv->pm_iir != 0);
7436
	I915_WRITE(GEN6_PMIMR, 0);
7437
//   spin_unlock_irq(&dev_priv->rps_lock);
7438
	/* enable all PM interrupts */
7439
	I915_WRITE(GEN6_PMINTRMSK, 0);
7440
 
7441
	gen6_gt_force_wake_put(dev_priv);
7442
	mutex_unlock(&dev_priv->dev->struct_mutex);
7443
}
7444
 
7445
void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
7446
{
7447
	int min_freq = 15;
7448
	int gpu_freq, ia_freq, max_ia_freq;
7449
	int scaling_factor = 180;
7450
 
7451
//   max_ia_freq = cpufreq_quick_get_max(0);
7452
	/*
7453
	 * Default to measured freq if none found, PCU will ensure we don't go
7454
	 * over
7455
	 */
7456
//   if (!max_ia_freq)
7457
		max_ia_freq = 3000000; //tsc_khz;
7458
 
7459
	/* Convert from kHz to MHz */
7460
	max_ia_freq /= 1000;
7461
 
7462
	mutex_lock(&dev_priv->dev->struct_mutex);
7463
 
7464
	/*
7465
	 * For each potential GPU frequency, load a ring frequency we'd like
7466
	 * to use for memory access.  We do this by specifying the IA frequency
7467
	 * the PCU should use as a reference to determine the ring frequency.
7468
	 */
7469
	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
7470
	     gpu_freq--) {
7471
		int diff = dev_priv->max_delay - gpu_freq;
7472
 
7473
		/*
7474
		 * For GPU frequencies less than 750MHz, just use the lowest
7475
		 * ring freq.
7476
		 */
7477
		if (gpu_freq < min_freq)
7478
			ia_freq = 800;
7479
		else
7480
			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
7481
		ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
7482
 
7483
		I915_WRITE(GEN6_PCODE_DATA,
7484
			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
7485
			   gpu_freq);
7486
		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
7487
			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7488
		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
7489
			      GEN6_PCODE_READY) == 0, 10)) {
7490
			DRM_ERROR("pcode write of freq table timed out\n");
7491
			continue;
7492
		}
7493
	}
7494
 
7495
	mutex_unlock(&dev_priv->dev->struct_mutex);
7496
}
7497
 
2327 Serge 7498
static void ironlake_init_clock_gating(struct drm_device *dev)
7499
{
7500
    struct drm_i915_private *dev_priv = dev->dev_private;
7501
    uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7502
 
7503
    /* Required for FBC */
7504
    dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
7505
        DPFCRUNIT_CLOCK_GATE_DISABLE |
7506
        DPFDUNIT_CLOCK_GATE_DISABLE;
7507
    /* Required for CxSR */
7508
    dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
7509
 
7510
    I915_WRITE(PCH_3DCGDIS0,
7511
           MARIUNIT_CLOCK_GATE_DISABLE |
7512
           SVSMUNIT_CLOCK_GATE_DISABLE);
7513
    I915_WRITE(PCH_3DCGDIS1,
7514
           VFMUNIT_CLOCK_GATE_DISABLE);
7515
 
7516
    I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7517
 
7518
    /*
7519
     * According to the spec the following bits should be set in
7520
     * order to enable memory self-refresh
7521
     * The bit 22/21 of 0x42004
7522
     * The bit 5 of 0x42020
7523
     * The bit 15 of 0x45000
7524
     */
7525
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
7526
           (I915_READ(ILK_DISPLAY_CHICKEN2) |
7527
            ILK_DPARB_GATE | ILK_VSDPFD_FULL));
7528
    I915_WRITE(ILK_DSPCLK_GATE,
7529
           (I915_READ(ILK_DSPCLK_GATE) |
7530
            ILK_DPARB_CLK_GATE));
7531
    I915_WRITE(DISP_ARB_CTL,
7532
           (I915_READ(DISP_ARB_CTL) |
7533
            DISP_FBC_WM_DIS));
7534
    I915_WRITE(WM3_LP_ILK, 0);
7535
    I915_WRITE(WM2_LP_ILK, 0);
7536
    I915_WRITE(WM1_LP_ILK, 0);
7537
 
7538
    /*
7539
     * Based on the document from hardware guys the following bits
7540
     * should be set unconditionally in order to enable FBC.
7541
     * The bit 22 of 0x42000
7542
     * The bit 22 of 0x42004
7543
     * The bit 7,8,9 of 0x42020.
7544
     */
7545
    if (IS_IRONLAKE_M(dev)) {
7546
        I915_WRITE(ILK_DISPLAY_CHICKEN1,
7547
               I915_READ(ILK_DISPLAY_CHICKEN1) |
7548
               ILK_FBCQ_DIS);
7549
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
7550
               I915_READ(ILK_DISPLAY_CHICKEN2) |
7551
               ILK_DPARB_GATE);
7552
        I915_WRITE(ILK_DSPCLK_GATE,
7553
               I915_READ(ILK_DSPCLK_GATE) |
7554
               ILK_DPFC_DIS1 |
7555
               ILK_DPFC_DIS2 |
7556
               ILK_CLK_FBC);
7557
    }
7558
 
7559
    I915_WRITE(ILK_DISPLAY_CHICKEN2,
7560
           I915_READ(ILK_DISPLAY_CHICKEN2) |
7561
           ILK_ELPIN_409_SELECT);
7562
    I915_WRITE(_3D_CHICKEN2,
7563
           _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
7564
           _3D_CHICKEN2_WM_READ_PIPELINED);
7565
}
7566
 
7567
static void gen6_init_clock_gating(struct drm_device *dev)
7568
{
7569
	struct drm_i915_private *dev_priv = dev->dev_private;
7570
	int pipe;
7571
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7572
 
7573
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7574
 
7575
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7576
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7577
		   ILK_ELPIN_409_SELECT);
7578
 
7579
	I915_WRITE(WM3_LP_ILK, 0);
7580
	I915_WRITE(WM2_LP_ILK, 0);
7581
	I915_WRITE(WM1_LP_ILK, 0);
7582
 
2342 Serge 7583
	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7584
	 * gating disable must be set.  Failure to set it results in
7585
	 * flickering pixels due to Z write ordering failures after
7586
	 * some amount of runtime in the Mesa "fire" demo, and Unigine
7587
	 * Sanctuary and Tropics, and apparently anything else with
7588
	 * alpha test or pixel discard.
7589
	 *
7590
	 * According to the spec, bit 11 (RCCUNIT) must also be set,
7591
	 * but we didn't debug actual testcases to find it out.
7592
	 */
7593
	I915_WRITE(GEN6_UCGCTL2,
7594
		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
7595
		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
7596
 
2327 Serge 7597
	/*
7598
	 * According to the spec the following bits should be
7599
	 * set in order to enable memory self-refresh and fbc:
7600
	 * The bit21 and bit22 of 0x42000
7601
	 * The bit21 and bit22 of 0x42004
7602
	 * The bit5 and bit7 of 0x42020
7603
	 * The bit14 of 0x70180
7604
	 * The bit14 of 0x71180
7605
	 */
7606
	I915_WRITE(ILK_DISPLAY_CHICKEN1,
7607
		   I915_READ(ILK_DISPLAY_CHICKEN1) |
7608
		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
7609
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
7610
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
7611
		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
7612
	I915_WRITE(ILK_DSPCLK_GATE,
7613
		   I915_READ(ILK_DSPCLK_GATE) |
7614
		   ILK_DPARB_CLK_GATE  |
7615
		   ILK_DPFD_CLK_GATE);
7616
 
7617
	for_each_pipe(pipe) {
7618
		I915_WRITE(DSPCNTR(pipe),
7619
			   I915_READ(DSPCNTR(pipe)) |
7620
			   DISPPLANE_TRICKLE_FEED_DISABLE);
7621
		intel_flush_display_plane(dev_priv, pipe);
7622
	}
7623
}
7624
 
7625
static void ivybridge_init_clock_gating(struct drm_device *dev)
7626
{
7627
	struct drm_i915_private *dev_priv = dev->dev_private;
7628
	int pipe;
7629
	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
7630
 
7631
	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
7632
 
7633
	I915_WRITE(WM3_LP_ILK, 0);
7634
	I915_WRITE(WM2_LP_ILK, 0);
7635
	I915_WRITE(WM1_LP_ILK, 0);
7636
 
7637
	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
7638
 
2342 Serge 7639
	I915_WRITE(IVB_CHICKEN3,
7640
		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7641
		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
7642
 
2327 Serge 7643
	for_each_pipe(pipe) {
7644
		I915_WRITE(DSPCNTR(pipe),
7645
			   I915_READ(DSPCNTR(pipe)) |
7646
			   DISPPLANE_TRICKLE_FEED_DISABLE);
7647
		intel_flush_display_plane(dev_priv, pipe);
7648
	}
7649
}
7650
 
7651
static void g4x_init_clock_gating(struct drm_device *dev)
7652
{
7653
    struct drm_i915_private *dev_priv = dev->dev_private;
7654
    uint32_t dspclk_gate;
7655
 
7656
    I915_WRITE(RENCLK_GATE_D1, 0);
7657
    I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7658
           GS_UNIT_CLOCK_GATE_DISABLE |
7659
           CL_UNIT_CLOCK_GATE_DISABLE);
7660
    I915_WRITE(RAMCLK_GATE_D, 0);
7661
    dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7662
        OVRUNIT_CLOCK_GATE_DISABLE |
7663
        OVCUNIT_CLOCK_GATE_DISABLE;
7664
    if (IS_GM45(dev))
7665
        dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7666
    I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7667
}
7668
 
7669
static void crestline_init_clock_gating(struct drm_device *dev)
7670
{
7671
	struct drm_i915_private *dev_priv = dev->dev_private;
7672
 
7673
	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7674
	I915_WRITE(RENCLK_GATE_D2, 0);
7675
	I915_WRITE(DSPCLK_GATE_D, 0);
7676
	I915_WRITE(RAMCLK_GATE_D, 0);
7677
	I915_WRITE16(DEUC, 0);
7678
}
7679
 
7680
static void broadwater_init_clock_gating(struct drm_device *dev)
7681
{
7682
	struct drm_i915_private *dev_priv = dev->dev_private;
7683
 
7684
	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7685
		   I965_RCC_CLOCK_GATE_DISABLE |
7686
		   I965_RCPB_CLOCK_GATE_DISABLE |
7687
		   I965_ISC_CLOCK_GATE_DISABLE |
7688
		   I965_FBC_CLOCK_GATE_DISABLE);
7689
	I915_WRITE(RENCLK_GATE_D2, 0);
7690
}
7691
 
7692
static void gen3_init_clock_gating(struct drm_device *dev)
7693
{
7694
    struct drm_i915_private *dev_priv = dev->dev_private;
7695
    u32 dstate = I915_READ(D_STATE);
7696
 
7697
    dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7698
        DSTATE_DOT_CLOCK_GATING;
7699
    I915_WRITE(D_STATE, dstate);
7700
}
7701
 
7702
static void i85x_init_clock_gating(struct drm_device *dev)
7703
{
7704
	struct drm_i915_private *dev_priv = dev->dev_private;
7705
 
7706
	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7707
}
7708
 
7709
static void i830_init_clock_gating(struct drm_device *dev)
7710
{
7711
	struct drm_i915_private *dev_priv = dev->dev_private;
7712
 
7713
	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7714
}
7715
 
7716
static void ibx_init_clock_gating(struct drm_device *dev)
7717
{
7718
    struct drm_i915_private *dev_priv = dev->dev_private;
7719
 
7720
    /*
7721
     * On Ibex Peak and Cougar Point, we need to disable clock
7722
     * gating for the panel power sequencer or it will fail to
7723
     * start up when no ports are active.
7724
     */
7725
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7726
}
7727
 
7728
static void cpt_init_clock_gating(struct drm_device *dev)
7729
{
7730
    struct drm_i915_private *dev_priv = dev->dev_private;
7731
    int pipe;
7732
 
7733
    /*
7734
     * On Ibex Peak and Cougar Point, we need to disable clock
7735
     * gating for the panel power sequencer or it will fail to
7736
     * start up when no ports are active.
7737
     */
7738
    I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7739
    I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7740
           DPLS_EDP_PPS_FIX_DIS);
7741
    /* Without this, mode sets may fail silently on FDI */
7742
    for_each_pipe(pipe)
7743
        I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
7744
}
7745
 
2332 Serge 7746
static void ironlake_teardown_rc6(struct drm_device *dev)
7747
{
7748
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 7749
 
2332 Serge 7750
	if (dev_priv->renderctx) {
7751
//		i915_gem_object_unpin(dev_priv->renderctx);
7752
//		drm_gem_object_unreference(&dev_priv->renderctx->base);
7753
		dev_priv->renderctx = NULL;
7754
	}
2327 Serge 7755
 
2332 Serge 7756
	if (dev_priv->pwrctx) {
7757
//		i915_gem_object_unpin(dev_priv->pwrctx);
7758
//		drm_gem_object_unreference(&dev_priv->pwrctx->base);
7759
		dev_priv->pwrctx = NULL;
7760
	}
7761
}
2327 Serge 7762
 
2339 Serge 7763
static void ironlake_disable_rc6(struct drm_device *dev)
7764
{
7765
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 7766
 
2339 Serge 7767
	if (I915_READ(PWRCTXA)) {
7768
		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
7769
		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
7770
		wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
7771
			 50);
2332 Serge 7772
 
2339 Serge 7773
		I915_WRITE(PWRCTXA, 0);
7774
		POSTING_READ(PWRCTXA);
2332 Serge 7775
 
2339 Serge 7776
		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7777
		POSTING_READ(RSTDBYCTL);
7778
	}
2332 Serge 7779
 
2339 Serge 7780
	ironlake_teardown_rc6(dev);
7781
}
2332 Serge 7782
 
7783
static int ironlake_setup_rc6(struct drm_device *dev)
7784
{
7785
	struct drm_i915_private *dev_priv = dev->dev_private;
7786
 
7787
	if (dev_priv->renderctx == NULL)
7788
//		dev_priv->renderctx = intel_alloc_context_page(dev);
7789
	if (!dev_priv->renderctx)
7790
		return -ENOMEM;
7791
 
7792
	if (dev_priv->pwrctx == NULL)
7793
//		dev_priv->pwrctx = intel_alloc_context_page(dev);
7794
	if (!dev_priv->pwrctx) {
7795
		ironlake_teardown_rc6(dev);
7796
		return -ENOMEM;
7797
	}
7798
 
7799
	return 0;
7800
}
7801
 
7802
void ironlake_enable_rc6(struct drm_device *dev)
7803
{
7804
	struct drm_i915_private *dev_priv = dev->dev_private;
7805
	int ret;
7806
 
7807
	/* rc6 disabled by default due to repeated reports of hanging during
7808
	 * boot and resume.
7809
	 */
2342 Serge 7810
	if (!intel_enable_rc6(dev))
2332 Serge 7811
		return;
7812
 
7813
	mutex_lock(&dev->struct_mutex);
7814
	ret = ironlake_setup_rc6(dev);
7815
	if (ret) {
7816
		mutex_unlock(&dev->struct_mutex);
7817
		return;
7818
	}
7819
 
7820
	/*
7821
	 * GPU can automatically power down the render unit if given a page
7822
	 * to save state.
7823
	 */
7824
#if 0
7825
	ret = BEGIN_LP_RING(6);
7826
	if (ret) {
7827
		ironlake_teardown_rc6(dev);
7828
		mutex_unlock(&dev->struct_mutex);
7829
		return;
7830
	}
7831
 
7832
	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
7833
	OUT_RING(MI_SET_CONTEXT);
7834
	OUT_RING(dev_priv->renderctx->gtt_offset |
7835
		 MI_MM_SPACE_GTT |
7836
		 MI_SAVE_EXT_STATE_EN |
7837
		 MI_RESTORE_EXT_STATE_EN |
7838
		 MI_RESTORE_INHIBIT);
7839
	OUT_RING(MI_SUSPEND_FLUSH);
7840
	OUT_RING(MI_NOOP);
7841
	OUT_RING(MI_FLUSH);
7842
	ADVANCE_LP_RING();
7843
 
7844
	/*
7845
	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
7846
	 * does an implicit flush, combined with MI_FLUSH above, it should be
7847
	 * safe to assume that renderctx is valid
7848
	 */
7849
	ret = intel_wait_ring_idle(LP_RING(dev_priv));
7850
	if (ret) {
7851
		DRM_ERROR("failed to enable ironlake power power savings\n");
7852
		ironlake_teardown_rc6(dev);
7853
		mutex_unlock(&dev->struct_mutex);
7854
		return;
7855
	}
7856
#endif
7857
 
7858
	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
7859
	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
7860
	mutex_unlock(&dev->struct_mutex);
7861
}
7862
 
2330 Serge 7863
void intel_init_clock_gating(struct drm_device *dev)
7864
{
7865
	struct drm_i915_private *dev_priv = dev->dev_private;
7866
 
7867
	dev_priv->display.init_clock_gating(dev);
7868
 
7869
	if (dev_priv->display.init_pch_clock_gating)
7870
		dev_priv->display.init_pch_clock_gating(dev);
7871
}
7872
 
2327 Serge 7873
/* Set up chip specific display functions */
7874
static void intel_init_display(struct drm_device *dev)
7875
{
7876
    struct drm_i915_private *dev_priv = dev->dev_private;
7877
 
7878
    /* We always want a DPMS function */
7879
    if (HAS_PCH_SPLIT(dev)) {
7880
        dev_priv->display.dpms = ironlake_crtc_dpms;
7881
        dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7882
        dev_priv->display.update_plane = ironlake_update_plane;
7883
    } else {
7884
        dev_priv->display.dpms = i9xx_crtc_dpms;
7885
        dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7886
        dev_priv->display.update_plane = i9xx_update_plane;
7887
    }
7888
 
7889
    if (I915_HAS_FBC(dev)) {
7890
        if (HAS_PCH_SPLIT(dev)) {
7891
            dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7892
            dev_priv->display.enable_fbc = ironlake_enable_fbc;
7893
            dev_priv->display.disable_fbc = ironlake_disable_fbc;
7894
        } else if (IS_GM45(dev)) {
7895
            dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7896
            dev_priv->display.enable_fbc = g4x_enable_fbc;
7897
            dev_priv->display.disable_fbc = g4x_disable_fbc;
7898
        } else if (IS_CRESTLINE(dev)) {
7899
            dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7900
            dev_priv->display.enable_fbc = i8xx_enable_fbc;
7901
            dev_priv->display.disable_fbc = i8xx_disable_fbc;
7902
        }
7903
        /* 855GM needs testing */
7904
    }
7905
 
7906
    /* Returns the core display clock speed */
2342 Serge 7907
	if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
2327 Serge 7908
        dev_priv->display.get_display_clock_speed =
7909
            i945_get_display_clock_speed;
7910
    else if (IS_I915G(dev))
7911
        dev_priv->display.get_display_clock_speed =
7912
            i915_get_display_clock_speed;
7913
    else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
7914
        dev_priv->display.get_display_clock_speed =
7915
            i9xx_misc_get_display_clock_speed;
7916
    else if (IS_I915GM(dev))
7917
        dev_priv->display.get_display_clock_speed =
7918
            i915gm_get_display_clock_speed;
7919
    else if (IS_I865G(dev))
7920
        dev_priv->display.get_display_clock_speed =
7921
            i865_get_display_clock_speed;
7922
    else if (IS_I85X(dev))
7923
        dev_priv->display.get_display_clock_speed =
7924
            i855_get_display_clock_speed;
7925
    else /* 852, 830 */
7926
        dev_priv->display.get_display_clock_speed =
7927
            i830_get_display_clock_speed;
7928
 
7929
    /* For FIFO watermark updates */
7930
    if (HAS_PCH_SPLIT(dev)) {
2342 Serge 7931
		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
7932
		dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
7933
 
7934
		/* IVB configs may use multi-threaded forcewake */
7935
		if (IS_IVYBRIDGE(dev)) {
7936
			u32	ecobus;
7937
 
7938
			/* A small trick here - if the bios hasn't configured MT forcewake,
7939
			 * and if the device is in RC6, then force_wake_mt_get will not wake
7940
			 * the device and the ECOBUS read will return zero. Which will be
7941
			 * (correctly) interpreted by the test below as MT forcewake being
7942
			 * disabled.
7943
			 */
7944
			mutex_lock(&dev->struct_mutex);
7945
			__gen6_gt_force_wake_mt_get(dev_priv);
7946
			ecobus = I915_READ_NOTRACE(ECOBUS);
7947
			__gen6_gt_force_wake_mt_put(dev_priv);
7948
			mutex_unlock(&dev->struct_mutex);
7949
 
7950
			if (ecobus & FORCEWAKE_MT_ENABLE) {
7951
				DRM_DEBUG_KMS("Using MT version of forcewake\n");
7952
				dev_priv->display.force_wake_get =
7953
					__gen6_gt_force_wake_mt_get;
7954
				dev_priv->display.force_wake_put =
7955
					__gen6_gt_force_wake_mt_put;
7956
			}
7957
		}
7958
 
2327 Serge 7959
        if (HAS_PCH_IBX(dev))
7960
            dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
7961
        else if (HAS_PCH_CPT(dev))
7962
            dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
7963
 
7964
        if (IS_GEN5(dev)) {
7965
            if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
7966
                dev_priv->display.update_wm = ironlake_update_wm;
7967
            else {
7968
                DRM_DEBUG_KMS("Failed to get proper latency. "
7969
                          "Disable CxSR\n");
7970
                dev_priv->display.update_wm = NULL;
7971
            }
7972
            dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
7973
            dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
2342 Serge 7974
			dev_priv->display.write_eld = ironlake_write_eld;
2327 Serge 7975
        } else if (IS_GEN6(dev)) {
7976
            if (SNB_READ_WM0_LATENCY()) {
7977
                dev_priv->display.update_wm = sandybridge_update_wm;
2342 Serge 7978
				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
2327 Serge 7979
            } else {
7980
                DRM_DEBUG_KMS("Failed to read display plane latency. "
7981
                          "Disable CxSR\n");
7982
                dev_priv->display.update_wm = NULL;
7983
            }
7984
            dev_priv->display.fdi_link_train = gen6_fdi_link_train;
7985
            dev_priv->display.init_clock_gating = gen6_init_clock_gating;
2342 Serge 7986
			dev_priv->display.write_eld = ironlake_write_eld;
2327 Serge 7987
        } else if (IS_IVYBRIDGE(dev)) {
7988
            /* FIXME: detect B0+ stepping and use auto training */
7989
            dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
7990
            if (SNB_READ_WM0_LATENCY()) {
7991
                dev_priv->display.update_wm = sandybridge_update_wm;
2342 Serge 7992
				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
2327 Serge 7993
            } else {
7994
                DRM_DEBUG_KMS("Failed to read display plane latency. "
7995
                          "Disable CxSR\n");
7996
                dev_priv->display.update_wm = NULL;
7997
            }
7998
            dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
2342 Serge 7999
			dev_priv->display.write_eld = ironlake_write_eld;
2327 Serge 8000
        } else
8001
            dev_priv->display.update_wm = NULL;
8002
    } else if (IS_PINEVIEW(dev)) {
8003
        if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
8004
                        dev_priv->is_ddr3,
8005
                        dev_priv->fsb_freq,
8006
                        dev_priv->mem_freq)) {
8007
            DRM_INFO("failed to find known CxSR latency "
8008
                 "(found ddr%s fsb freq %d, mem freq %d), "
8009
                 "disabling CxSR\n",
2342 Serge 8010
				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
2327 Serge 8011
                 dev_priv->fsb_freq, dev_priv->mem_freq);
8012
            /* Disable CxSR and never update its watermark again */
8013
            pineview_disable_cxsr(dev);
8014
            dev_priv->display.update_wm = NULL;
8015
        } else
8016
            dev_priv->display.update_wm = pineview_update_wm;
8017
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
8018
    } else if (IS_G4X(dev)) {
2342 Serge 8019
		dev_priv->display.write_eld = g4x_write_eld;
2327 Serge 8020
        dev_priv->display.update_wm = g4x_update_wm;
8021
        dev_priv->display.init_clock_gating = g4x_init_clock_gating;
8022
    } else if (IS_GEN4(dev)) {
8023
        dev_priv->display.update_wm = i965_update_wm;
8024
        if (IS_CRESTLINE(dev))
8025
            dev_priv->display.init_clock_gating = crestline_init_clock_gating;
8026
        else if (IS_BROADWATER(dev))
8027
            dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
8028
    } else if (IS_GEN3(dev)) {
8029
        dev_priv->display.update_wm = i9xx_update_wm;
8030
        dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
8031
        dev_priv->display.init_clock_gating = gen3_init_clock_gating;
8032
    } else if (IS_I865G(dev)) {
8033
        dev_priv->display.update_wm = i830_update_wm;
8034
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8035
        dev_priv->display.get_fifo_size = i830_get_fifo_size;
8036
    } else if (IS_I85X(dev)) {
8037
        dev_priv->display.update_wm = i9xx_update_wm;
8038
        dev_priv->display.get_fifo_size = i85x_get_fifo_size;
8039
        dev_priv->display.init_clock_gating = i85x_init_clock_gating;
8040
    } else {
8041
        dev_priv->display.update_wm = i830_update_wm;
8042
        dev_priv->display.init_clock_gating = i830_init_clock_gating;
8043
        if (IS_845G(dev))
8044
            dev_priv->display.get_fifo_size = i845_get_fifo_size;
8045
        else
8046
            dev_priv->display.get_fifo_size = i830_get_fifo_size;
8047
    }
8048
 
8049
    /* Default just returns -ENODEV to indicate unsupported */
8050
//    dev_priv->display.queue_flip = intel_default_queue_flip;
8051
 
8052
#if 0
8053
    switch (INTEL_INFO(dev)->gen) {
8054
    case 2:
8055
        dev_priv->display.queue_flip = intel_gen2_queue_flip;
8056
        break;
8057
 
8058
    case 3:
8059
        dev_priv->display.queue_flip = intel_gen3_queue_flip;
8060
        break;
8061
 
8062
    case 4:
8063
    case 5:
8064
        dev_priv->display.queue_flip = intel_gen4_queue_flip;
8065
        break;
8066
 
8067
    case 6:
8068
        dev_priv->display.queue_flip = intel_gen6_queue_flip;
8069
        break;
8070
    case 7:
8071
        dev_priv->display.queue_flip = intel_gen7_queue_flip;
8072
        break;
8073
    }
8074
#endif
8075
}
8076
 
8077
/*
8078
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
8079
 * resume, or other times.  This quirk makes sure that's the case for
8080
 * affected systems.
8081
 */
2342 Serge 8082
static void quirk_pipea_force(struct drm_device *dev)
2327 Serge 8083
{
8084
    struct drm_i915_private *dev_priv = dev->dev_private;
8085
 
8086
    dev_priv->quirks |= QUIRK_PIPEA_FORCE;
8087
    DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
8088
}
8089
 
8090
/*
8091
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
8092
 */
8093
static void quirk_ssc_force_disable(struct drm_device *dev)
8094
{
8095
    struct drm_i915_private *dev_priv = dev->dev_private;
8096
    dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
8097
}
8098
 
8099
struct intel_quirk {
8100
    int device;
8101
    int subsystem_vendor;
8102
    int subsystem_device;
8103
    void (*hook)(struct drm_device *dev);
8104
};
8105
 
8106
struct intel_quirk intel_quirks[] = {
8107
    /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
8108
    { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
8109
    /* HP Mini needs pipe A force quirk (LP: #322104) */
2342 Serge 8110
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
2327 Serge 8111
 
8112
    /* Thinkpad R31 needs pipe A force quirk */
8113
    { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
8114
    /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
8115
    { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
8116
 
8117
    /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
8118
    { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
8119
    /* ThinkPad X40 needs pipe A force quirk */
8120
 
8121
    /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
8122
    { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
8123
 
8124
    /* 855 & before need to leave pipe A & dpll A up */
8125
    { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8126
    { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8127
 
8128
    /* Lenovo U160 cannot use SSC on LVDS */
8129
    { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
8130
 
8131
    /* Sony Vaio Y cannot use SSC on LVDS */
8132
    { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
8133
};
8134
 
8135
static void intel_init_quirks(struct drm_device *dev)
8136
{
8137
    struct pci_dev *d = dev->pdev;
8138
    int i;
8139
 
8140
    for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
8141
        struct intel_quirk *q = &intel_quirks[i];
8142
 
8143
        if (d->device == q->device &&
8144
            (d->subsystem_vendor == q->subsystem_vendor ||
8145
             q->subsystem_vendor == PCI_ANY_ID) &&
8146
            (d->subsystem_device == q->subsystem_device ||
8147
             q->subsystem_device == PCI_ANY_ID))
8148
            q->hook(dev);
8149
    }
8150
}
8151
 
2330 Serge 8152
/* Disable the VGA plane that we never use */
8153
static void i915_disable_vga(struct drm_device *dev)
8154
{
8155
	struct drm_i915_private *dev_priv = dev->dev_private;
8156
	u8 sr1;
8157
	u32 vga_reg;
2327 Serge 8158
 
2330 Serge 8159
	if (HAS_PCH_SPLIT(dev))
8160
		vga_reg = CPU_VGACNTRL;
8161
	else
8162
		vga_reg = VGACNTRL;
8163
 
8164
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
8165
    out8(VGA_SR_INDEX, 1);
8166
    sr1 = in8(VGA_SR_DATA);
8167
    out8(VGA_SR_DATA,sr1 | 1<<5);
8168
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
8169
	udelay(300);
8170
 
8171
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
8172
	POSTING_READ(vga_reg);
8173
}
8174
 
2327 Serge 8175
void intel_modeset_init(struct drm_device *dev)
8176
{
8177
    struct drm_i915_private *dev_priv = dev->dev_private;
2342 Serge 8178
	int i, ret;
2327 Serge 8179
 
8180
    drm_mode_config_init(dev);
8181
 
8182
    dev->mode_config.min_width = 0;
8183
    dev->mode_config.min_height = 0;
8184
 
8185
    dev->mode_config.funcs = (void *)&intel_mode_funcs;
8186
 
8187
    intel_init_quirks(dev);
8188
 
8189
    intel_init_display(dev);
8190
 
8191
    if (IS_GEN2(dev)) {
8192
        dev->mode_config.max_width = 2048;
8193
        dev->mode_config.max_height = 2048;
8194
    } else if (IS_GEN3(dev)) {
8195
        dev->mode_config.max_width = 4096;
8196
        dev->mode_config.max_height = 4096;
8197
    } else {
8198
        dev->mode_config.max_width = 8192;
8199
        dev->mode_config.max_height = 8192;
8200
    }
8201
    dev->mode_config.fb_base = get_bus_addr();
8202
 
8203
    DRM_DEBUG_KMS("%d display pipe%s available.\n",
8204
              dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
8205
 
8206
    for (i = 0; i < dev_priv->num_pipe; i++) {
8207
        intel_crtc_init(dev, i);
2342 Serge 8208
		ret = intel_plane_init(dev, i);
8209
		if (ret)
8210
			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
2327 Serge 8211
    }
8212
 
8213
    /* Just disable it once at startup */
8214
    i915_disable_vga(dev);
8215
    intel_setup_outputs(dev);
8216
 
8217
    intel_init_clock_gating(dev);
8218
 
8219
    if (IS_IRONLAKE_M(dev)) {
8220
        ironlake_enable_drps(dev);
8221
        intel_init_emon(dev);
8222
    }
8223
 
8224
    if (IS_GEN6(dev) || IS_GEN7(dev)) {
8225
        gen6_enable_rps(dev_priv);
8226
        gen6_update_ring_freq(dev_priv);
8227
    }
8228
 
2332 Serge 8229
//   INIT_WORK(&dev_priv->idle_work, intel_idle_update);
8230
//   setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
8231
//           (unsigned long)dev);
2330 Serge 8232
}
2327 Serge 8233
 
2332 Serge 8234
void intel_modeset_gem_init(struct drm_device *dev)
8235
{
8236
	if (IS_IRONLAKE_M(dev))
8237
		ironlake_enable_rc6(dev);
2330 Serge 8238
 
2332 Serge 8239
//	intel_setup_overlay(dev);
8240
}
8241
 
8242
 
2330 Serge 8243
/*
8244
 * Return which encoder is currently attached for connector.
8245
 */
8246
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
8247
{
8248
	return &intel_attached_encoder(connector)->base;
2327 Serge 8249
}
8250
 
2330 Serge 8251
void intel_connector_attach_encoder(struct intel_connector *connector,
8252
				    struct intel_encoder *encoder)
8253
{
8254
	connector->encoder = encoder;
8255
	drm_mode_connector_attach_encoder(&connector->base,
8256
					  &encoder->base);
8257
}
2327 Serge 8258
 
2330 Serge 8259