Subversion Repositories Kolibri OS

Rev

Rev 3263 | Rev 3480 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *  Eric Anholt 
25
 */
26
 
27
//#include 
28
#include 
29
//#include 
30
#include 
31
#include 
2330 Serge 32
#include 
2327 Serge 33
//#include 
2342 Serge 34
#include 
3031 serge 35
#include 
2327 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2327 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
3031 serge 40
#include 
41
#include 
42
//#include 
2327 Serge 43
 
44
phys_addr_t get_bus_addr(void);
45
 
46
static inline __attribute__((const))
47
bool is_power_of_2(unsigned long n)
48
{
49
    return (n != 0 && ((n & (n - 1)) == 0));
50
}
51
 
2330 Serge 52
#define MAX_ERRNO       4095
53
 
54
 
55
 
2342 Serge 56
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
2327 Serge 57
static void intel_increase_pllclock(struct drm_crtc *crtc);
3243 Serge 58
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
2327 Serge 59
 
60
typedef struct {
61
    /* given values */
62
    int n;
63
    int m1, m2;
64
    int p1, p2;
65
    /* derived values */
66
    int dot;
67
    int vco;
68
    int m;
69
    int p;
70
} intel_clock_t;
71
 
72
typedef struct {
73
    int min, max;
74
} intel_range_t;
75
 
76
typedef struct {
77
    int dot_limit;
78
    int p2_slow, p2_fast;
79
} intel_p2_t;
80
 
81
#define INTEL_P2_NUM              2
82
typedef struct intel_limit intel_limit_t;
83
struct intel_limit {
84
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
85
    intel_p2_t      p2;
86
    bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
3031 serge 87
			int, int, intel_clock_t *, intel_clock_t *);
2327 Serge 88
};
89
 
90
/* FDI */
91
#define IRONLAKE_FDI_FREQ       2700000 /* in kHz for mode->clock */
92
 
3243 Serge 93
int
94
intel_pch_rawclk(struct drm_device *dev)
95
{
96
	struct drm_i915_private *dev_priv = dev->dev_private;
97
 
98
	WARN_ON(!HAS_PCH_SPLIT(dev));
99
 
100
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
101
}
102
 
2327 Serge 103
static bool
104
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 105
		    int target, int refclk, intel_clock_t *match_clock,
106
		    intel_clock_t *best_clock);
2327 Serge 107
static bool
108
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 109
			int target, int refclk, intel_clock_t *match_clock,
110
			intel_clock_t *best_clock);
2327 Serge 111
 
112
static bool
113
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
3031 serge 114
		      int target, int refclk, intel_clock_t *match_clock,
115
		      intel_clock_t *best_clock);
2327 Serge 116
static bool
117
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
3031 serge 118
			   int target, int refclk, intel_clock_t *match_clock,
119
			   intel_clock_t *best_clock);
2327 Serge 120
 
3031 serge 121
static bool
122
intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
123
			int target, int refclk, intel_clock_t *match_clock,
124
			intel_clock_t *best_clock);
125
 
2327 Serge 126
static inline u32 /* units of 100MHz */
127
intel_fdi_link_freq(struct drm_device *dev)
128
{
129
	if (IS_GEN5(dev)) {
130
		struct drm_i915_private *dev_priv = dev->dev_private;
131
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
132
	} else
133
		return 27;
134
}
135
 
136
static const intel_limit_t intel_limits_i8xx_dvo = {
137
        .dot = { .min = 25000, .max = 350000 },
138
        .vco = { .min = 930000, .max = 1400000 },
139
        .n = { .min = 3, .max = 16 },
140
        .m = { .min = 96, .max = 140 },
141
        .m1 = { .min = 18, .max = 26 },
142
        .m2 = { .min = 6, .max = 16 },
143
        .p = { .min = 4, .max = 128 },
144
        .p1 = { .min = 2, .max = 33 },
145
	.p2 = { .dot_limit = 165000,
146
		.p2_slow = 4, .p2_fast = 2 },
147
	.find_pll = intel_find_best_PLL,
148
};
149
 
150
static const intel_limit_t intel_limits_i8xx_lvds = {
151
        .dot = { .min = 25000, .max = 350000 },
152
        .vco = { .min = 930000, .max = 1400000 },
153
        .n = { .min = 3, .max = 16 },
154
        .m = { .min = 96, .max = 140 },
155
        .m1 = { .min = 18, .max = 26 },
156
        .m2 = { .min = 6, .max = 16 },
157
        .p = { .min = 4, .max = 128 },
158
        .p1 = { .min = 1, .max = 6 },
159
	.p2 = { .dot_limit = 165000,
160
		.p2_slow = 14, .p2_fast = 7 },
161
	.find_pll = intel_find_best_PLL,
162
};
163
 
164
static const intel_limit_t intel_limits_i9xx_sdvo = {
165
        .dot = { .min = 20000, .max = 400000 },
166
        .vco = { .min = 1400000, .max = 2800000 },
167
        .n = { .min = 1, .max = 6 },
168
        .m = { .min = 70, .max = 120 },
169
        .m1 = { .min = 10, .max = 22 },
170
        .m2 = { .min = 5, .max = 9 },
171
        .p = { .min = 5, .max = 80 },
172
        .p1 = { .min = 1, .max = 8 },
173
	.p2 = { .dot_limit = 200000,
174
		.p2_slow = 10, .p2_fast = 5 },
175
	.find_pll = intel_find_best_PLL,
176
};
177
 
178
static const intel_limit_t intel_limits_i9xx_lvds = {
179
        .dot = { .min = 20000, .max = 400000 },
180
        .vco = { .min = 1400000, .max = 2800000 },
181
        .n = { .min = 1, .max = 6 },
182
        .m = { .min = 70, .max = 120 },
183
        .m1 = { .min = 10, .max = 22 },
184
        .m2 = { .min = 5, .max = 9 },
185
        .p = { .min = 7, .max = 98 },
186
        .p1 = { .min = 1, .max = 8 },
187
	.p2 = { .dot_limit = 112000,
188
		.p2_slow = 14, .p2_fast = 7 },
189
	.find_pll = intel_find_best_PLL,
190
};
191
 
192
 
193
static const intel_limit_t intel_limits_g4x_sdvo = {
194
	.dot = { .min = 25000, .max = 270000 },
195
	.vco = { .min = 1750000, .max = 3500000},
196
	.n = { .min = 1, .max = 4 },
197
	.m = { .min = 104, .max = 138 },
198
	.m1 = { .min = 17, .max = 23 },
199
	.m2 = { .min = 5, .max = 11 },
200
	.p = { .min = 10, .max = 30 },
201
	.p1 = { .min = 1, .max = 3},
202
	.p2 = { .dot_limit = 270000,
203
		.p2_slow = 10,
204
		.p2_fast = 10
205
	},
206
	.find_pll = intel_g4x_find_best_PLL,
207
};
208
 
209
static const intel_limit_t intel_limits_g4x_hdmi = {
210
	.dot = { .min = 22000, .max = 400000 },
211
	.vco = { .min = 1750000, .max = 3500000},
212
	.n = { .min = 1, .max = 4 },
213
	.m = { .min = 104, .max = 138 },
214
	.m1 = { .min = 16, .max = 23 },
215
	.m2 = { .min = 5, .max = 11 },
216
	.p = { .min = 5, .max = 80 },
217
	.p1 = { .min = 1, .max = 8},
218
	.p2 = { .dot_limit = 165000,
219
		.p2_slow = 10, .p2_fast = 5 },
220
	.find_pll = intel_g4x_find_best_PLL,
221
};
222
 
223
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
224
	.dot = { .min = 20000, .max = 115000 },
225
	.vco = { .min = 1750000, .max = 3500000 },
226
	.n = { .min = 1, .max = 3 },
227
	.m = { .min = 104, .max = 138 },
228
	.m1 = { .min = 17, .max = 23 },
229
	.m2 = { .min = 5, .max = 11 },
230
	.p = { .min = 28, .max = 112 },
231
	.p1 = { .min = 2, .max = 8 },
232
	.p2 = { .dot_limit = 0,
233
		.p2_slow = 14, .p2_fast = 14
234
	},
235
	.find_pll = intel_g4x_find_best_PLL,
236
};
237
 
238
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
239
	.dot = { .min = 80000, .max = 224000 },
240
	.vco = { .min = 1750000, .max = 3500000 },
241
	.n = { .min = 1, .max = 3 },
242
	.m = { .min = 104, .max = 138 },
243
	.m1 = { .min = 17, .max = 23 },
244
	.m2 = { .min = 5, .max = 11 },
245
	.p = { .min = 14, .max = 42 },
246
	.p1 = { .min = 2, .max = 6 },
247
	.p2 = { .dot_limit = 0,
248
		.p2_slow = 7, .p2_fast = 7
249
	},
250
	.find_pll = intel_g4x_find_best_PLL,
251
};
252
 
253
static const intel_limit_t intel_limits_g4x_display_port = {
254
        .dot = { .min = 161670, .max = 227000 },
255
        .vco = { .min = 1750000, .max = 3500000},
256
        .n = { .min = 1, .max = 2 },
257
        .m = { .min = 97, .max = 108 },
258
        .m1 = { .min = 0x10, .max = 0x12 },
259
        .m2 = { .min = 0x05, .max = 0x06 },
260
        .p = { .min = 10, .max = 20 },
261
        .p1 = { .min = 1, .max = 2},
262
        .p2 = { .dot_limit = 0,
263
		.p2_slow = 10, .p2_fast = 10 },
264
        .find_pll = intel_find_pll_g4x_dp,
265
};
266
 
267
static const intel_limit_t intel_limits_pineview_sdvo = {
268
        .dot = { .min = 20000, .max = 400000},
269
        .vco = { .min = 1700000, .max = 3500000 },
270
	/* Pineview's Ncounter is a ring counter */
271
        .n = { .min = 3, .max = 6 },
272
        .m = { .min = 2, .max = 256 },
273
	/* Pineview only has one combined m divider, which we treat as m2. */
274
        .m1 = { .min = 0, .max = 0 },
275
        .m2 = { .min = 0, .max = 254 },
276
        .p = { .min = 5, .max = 80 },
277
        .p1 = { .min = 1, .max = 8 },
278
	.p2 = { .dot_limit = 200000,
279
		.p2_slow = 10, .p2_fast = 5 },
280
	.find_pll = intel_find_best_PLL,
281
};
282
 
283
static const intel_limit_t intel_limits_pineview_lvds = {
284
        .dot = { .min = 20000, .max = 400000 },
285
        .vco = { .min = 1700000, .max = 3500000 },
286
        .n = { .min = 3, .max = 6 },
287
        .m = { .min = 2, .max = 256 },
288
        .m1 = { .min = 0, .max = 0 },
289
        .m2 = { .min = 0, .max = 254 },
290
        .p = { .min = 7, .max = 112 },
291
        .p1 = { .min = 1, .max = 8 },
292
	.p2 = { .dot_limit = 112000,
293
		.p2_slow = 14, .p2_fast = 14 },
294
	.find_pll = intel_find_best_PLL,
295
};
296
 
297
/* Ironlake / Sandybridge
298
 *
299
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
300
 * the range value for them is (actual_value - 2).
301
 */
302
static const intel_limit_t intel_limits_ironlake_dac = {
303
	.dot = { .min = 25000, .max = 350000 },
304
	.vco = { .min = 1760000, .max = 3510000 },
305
	.n = { .min = 1, .max = 5 },
306
	.m = { .min = 79, .max = 127 },
307
	.m1 = { .min = 12, .max = 22 },
308
	.m2 = { .min = 5, .max = 9 },
309
	.p = { .min = 5, .max = 80 },
310
	.p1 = { .min = 1, .max = 8 },
311
	.p2 = { .dot_limit = 225000,
312
		.p2_slow = 10, .p2_fast = 5 },
313
	.find_pll = intel_g4x_find_best_PLL,
314
};
315
 
316
static const intel_limit_t intel_limits_ironlake_single_lvds = {
317
	.dot = { .min = 25000, .max = 350000 },
318
	.vco = { .min = 1760000, .max = 3510000 },
319
	.n = { .min = 1, .max = 3 },
320
	.m = { .min = 79, .max = 118 },
321
	.m1 = { .min = 12, .max = 22 },
322
	.m2 = { .min = 5, .max = 9 },
323
	.p = { .min = 28, .max = 112 },
324
	.p1 = { .min = 2, .max = 8 },
325
	.p2 = { .dot_limit = 225000,
326
		.p2_slow = 14, .p2_fast = 14 },
327
	.find_pll = intel_g4x_find_best_PLL,
328
};
329
 
330
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
331
	.dot = { .min = 25000, .max = 350000 },
332
	.vco = { .min = 1760000, .max = 3510000 },
333
	.n = { .min = 1, .max = 3 },
334
	.m = { .min = 79, .max = 127 },
335
	.m1 = { .min = 12, .max = 22 },
336
	.m2 = { .min = 5, .max = 9 },
337
	.p = { .min = 14, .max = 56 },
338
	.p1 = { .min = 2, .max = 8 },
339
	.p2 = { .dot_limit = 225000,
340
		.p2_slow = 7, .p2_fast = 7 },
341
	.find_pll = intel_g4x_find_best_PLL,
342
};
343
 
344
/* LVDS 100mhz refclk limits. */
345
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
346
	.dot = { .min = 25000, .max = 350000 },
347
	.vco = { .min = 1760000, .max = 3510000 },
348
	.n = { .min = 1, .max = 2 },
349
	.m = { .min = 79, .max = 126 },
350
	.m1 = { .min = 12, .max = 22 },
351
	.m2 = { .min = 5, .max = 9 },
352
	.p = { .min = 28, .max = 112 },
2342 Serge 353
	.p1 = { .min = 2, .max = 8 },
2327 Serge 354
	.p2 = { .dot_limit = 225000,
355
		.p2_slow = 14, .p2_fast = 14 },
356
	.find_pll = intel_g4x_find_best_PLL,
357
};
358
 
359
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
360
	.dot = { .min = 25000, .max = 350000 },
361
	.vco = { .min = 1760000, .max = 3510000 },
362
	.n = { .min = 1, .max = 3 },
363
	.m = { .min = 79, .max = 126 },
364
	.m1 = { .min = 12, .max = 22 },
365
	.m2 = { .min = 5, .max = 9 },
366
	.p = { .min = 14, .max = 42 },
2342 Serge 367
	.p1 = { .min = 2, .max = 6 },
2327 Serge 368
	.p2 = { .dot_limit = 225000,
369
		.p2_slow = 7, .p2_fast = 7 },
370
	.find_pll = intel_g4x_find_best_PLL,
371
};
372
 
373
static const intel_limit_t intel_limits_ironlake_display_port = {
374
        .dot = { .min = 25000, .max = 350000 },
375
        .vco = { .min = 1760000, .max = 3510000},
376
        .n = { .min = 1, .max = 2 },
377
        .m = { .min = 81, .max = 90 },
378
        .m1 = { .min = 12, .max = 22 },
379
        .m2 = { .min = 5, .max = 9 },
380
        .p = { .min = 10, .max = 20 },
381
        .p1 = { .min = 1, .max = 2},
382
        .p2 = { .dot_limit = 0,
383
		.p2_slow = 10, .p2_fast = 10 },
384
        .find_pll = intel_find_pll_ironlake_dp,
385
};
386
 
3031 serge 387
static const intel_limit_t intel_limits_vlv_dac = {
388
	.dot = { .min = 25000, .max = 270000 },
389
	.vco = { .min = 4000000, .max = 6000000 },
390
	.n = { .min = 1, .max = 7 },
391
	.m = { .min = 22, .max = 450 }, /* guess */
392
	.m1 = { .min = 2, .max = 3 },
393
	.m2 = { .min = 11, .max = 156 },
394
	.p = { .min = 10, .max = 30 },
395
	.p1 = { .min = 2, .max = 3 },
396
	.p2 = { .dot_limit = 270000,
397
		.p2_slow = 2, .p2_fast = 20 },
398
	.find_pll = intel_vlv_find_best_pll,
399
};
400
 
401
static const intel_limit_t intel_limits_vlv_hdmi = {
402
	.dot = { .min = 20000, .max = 165000 },
3243 Serge 403
	.vco = { .min = 4000000, .max = 5994000},
3031 serge 404
	.n = { .min = 1, .max = 7 },
405
	.m = { .min = 60, .max = 300 }, /* guess */
406
	.m1 = { .min = 2, .max = 3 },
407
	.m2 = { .min = 11, .max = 156 },
408
	.p = { .min = 10, .max = 30 },
409
	.p1 = { .min = 2, .max = 3 },
410
	.p2 = { .dot_limit = 270000,
411
		.p2_slow = 2, .p2_fast = 20 },
412
	.find_pll = intel_vlv_find_best_pll,
413
};
414
 
415
static const intel_limit_t intel_limits_vlv_dp = {
3243 Serge 416
	.dot = { .min = 25000, .max = 270000 },
417
	.vco = { .min = 4000000, .max = 6000000 },
3031 serge 418
	.n = { .min = 1, .max = 7 },
3243 Serge 419
	.m = { .min = 22, .max = 450 },
3031 serge 420
	.m1 = { .min = 2, .max = 3 },
421
	.m2 = { .min = 11, .max = 156 },
422
	.p = { .min = 10, .max = 30 },
423
	.p1 = { .min = 2, .max = 3 },
424
	.p2 = { .dot_limit = 270000,
425
		.p2_slow = 2, .p2_fast = 20 },
426
	.find_pll = intel_vlv_find_best_pll,
427
};
428
 
429
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
430
{
431
	unsigned long flags;
432
	u32 val = 0;
433
 
434
	spin_lock_irqsave(&dev_priv->dpio_lock, flags);
435
	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
436
		DRM_ERROR("DPIO idle wait timed out\n");
437
		goto out_unlock;
438
	}
439
 
440
	I915_WRITE(DPIO_REG, reg);
441
	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
442
		   DPIO_BYTE);
443
	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
444
		DRM_ERROR("DPIO read wait timed out\n");
445
		goto out_unlock;
446
	}
447
	val = I915_READ(DPIO_DATA);
448
 
449
out_unlock:
450
	spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
451
	return val;
452
}
453
 
454
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
455
			     u32 val)
456
{
457
	unsigned long flags;
458
 
459
	spin_lock_irqsave(&dev_priv->dpio_lock, flags);
460
	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
461
		DRM_ERROR("DPIO idle wait timed out\n");
462
		goto out_unlock;
463
	}
464
 
465
	I915_WRITE(DPIO_DATA, val);
466
	I915_WRITE(DPIO_REG, reg);
467
	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
468
		   DPIO_BYTE);
469
	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
470
		DRM_ERROR("DPIO write wait timed out\n");
471
 
472
out_unlock:
473
       spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
474
}
475
 
476
static void vlv_init_dpio(struct drm_device *dev)
477
{
478
	struct drm_i915_private *dev_priv = dev->dev_private;
479
 
480
	/* Reset the DPIO config */
481
	I915_WRITE(DPIO_CTL, 0);
482
	POSTING_READ(DPIO_CTL);
483
	I915_WRITE(DPIO_CTL, 1);
484
	POSTING_READ(DPIO_CTL);
485
}
486
 
487
static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
488
{
489
	DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
490
	return 1;
491
}
492
 
493
static const struct dmi_system_id intel_dual_link_lvds[] = {
494
	{
495
		.callback = intel_dual_link_lvds_callback,
496
		.ident = "Apple MacBook Pro (Core i5/i7 Series)",
497
		.matches = {
498
			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
499
			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
500
		},
501
	},
502
	{ }	/* terminating entry */
503
};
504
 
505
static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
506
			      unsigned int reg)
507
{
508
	unsigned int val;
509
 
510
	/* use the module option value if specified */
511
	if (i915_lvds_channel_mode > 0)
512
		return i915_lvds_channel_mode == 2;
513
 
514
//   if (dmi_check_system(intel_dual_link_lvds))
515
//       return true;
516
 
517
	if (dev_priv->lvds_val)
518
		val = dev_priv->lvds_val;
519
	else {
520
		/* BIOS should set the proper LVDS register value at boot, but
521
		 * in reality, it doesn't set the value when the lid is closed;
522
		 * we need to check "the value to be set" in VBT when LVDS
523
		 * register is uninitialized.
524
		 */
525
		val = I915_READ(reg);
526
		if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
527
			val = dev_priv->bios_lvds_val;
528
		dev_priv->lvds_val = val;
529
	}
530
	return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
531
}
532
 
2327 Serge 533
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
534
						int refclk)
535
{
536
	struct drm_device *dev = crtc->dev;
537
	struct drm_i915_private *dev_priv = dev->dev_private;
538
	const intel_limit_t *limit;
539
 
540
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3031 serge 541
		if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
2327 Serge 542
			/* LVDS dual channel */
543
			if (refclk == 100000)
544
				limit = &intel_limits_ironlake_dual_lvds_100m;
545
			else
546
				limit = &intel_limits_ironlake_dual_lvds;
547
		} else {
548
			if (refclk == 100000)
549
				limit = &intel_limits_ironlake_single_lvds_100m;
550
			else
551
				limit = &intel_limits_ironlake_single_lvds;
552
		}
553
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3243 Serge 554
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
2327 Serge 555
		limit = &intel_limits_ironlake_display_port;
556
	else
557
		limit = &intel_limits_ironlake_dac;
558
 
559
	return limit;
560
}
561
 
562
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
563
{
564
	struct drm_device *dev = crtc->dev;
565
	struct drm_i915_private *dev_priv = dev->dev_private;
566
	const intel_limit_t *limit;
567
 
568
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3031 serge 569
		if (is_dual_link_lvds(dev_priv, LVDS))
2327 Serge 570
			/* LVDS with dual channel */
571
			limit = &intel_limits_g4x_dual_channel_lvds;
572
		else
573
			/* LVDS with dual channel */
574
			limit = &intel_limits_g4x_single_channel_lvds;
575
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
576
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
577
		limit = &intel_limits_g4x_hdmi;
578
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
579
		limit = &intel_limits_g4x_sdvo;
2342 Serge 580
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2327 Serge 581
		limit = &intel_limits_g4x_display_port;
582
	} else /* The option is for other outputs */
583
		limit = &intel_limits_i9xx_sdvo;
584
 
585
	return limit;
586
}
587
 
588
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
589
{
590
	struct drm_device *dev = crtc->dev;
591
	const intel_limit_t *limit;
592
 
593
	if (HAS_PCH_SPLIT(dev))
594
		limit = intel_ironlake_limit(crtc, refclk);
595
	else if (IS_G4X(dev)) {
596
		limit = intel_g4x_limit(crtc);
597
	} else if (IS_PINEVIEW(dev)) {
598
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
599
			limit = &intel_limits_pineview_lvds;
600
		else
601
			limit = &intel_limits_pineview_sdvo;
3031 serge 602
	} else if (IS_VALLEYVIEW(dev)) {
603
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
604
			limit = &intel_limits_vlv_dac;
605
		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
606
			limit = &intel_limits_vlv_hdmi;
607
		else
608
			limit = &intel_limits_vlv_dp;
2327 Serge 609
	} else if (!IS_GEN2(dev)) {
610
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
611
			limit = &intel_limits_i9xx_lvds;
612
		else
613
			limit = &intel_limits_i9xx_sdvo;
614
	} else {
615
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
616
			limit = &intel_limits_i8xx_lvds;
617
		else
618
			limit = &intel_limits_i8xx_dvo;
619
	}
620
	return limit;
621
}
622
 
623
/* m1 is reserved as 0 in Pineview, n is a ring counter */
624
static void pineview_clock(int refclk, intel_clock_t *clock)
625
{
626
	clock->m = clock->m2 + 2;
627
	clock->p = clock->p1 * clock->p2;
628
	clock->vco = refclk * clock->m / clock->n;
629
	clock->dot = clock->vco / clock->p;
630
}
631
 
632
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
633
{
634
	if (IS_PINEVIEW(dev)) {
635
		pineview_clock(refclk, clock);
636
		return;
637
	}
638
	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
639
	clock->p = clock->p1 * clock->p2;
640
	clock->vco = refclk * clock->m / (clock->n + 2);
641
	clock->dot = clock->vco / clock->p;
642
}
643
 
644
/**
645
 * Returns whether any output on the specified pipe is of the specified type
646
 */
647
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
648
{
649
	struct drm_device *dev = crtc->dev;
650
	struct intel_encoder *encoder;
651
 
3031 serge 652
	for_each_encoder_on_crtc(dev, crtc, encoder)
653
		if (encoder->type == type)
2327 Serge 654
			return true;
655
 
656
	return false;
657
}
658
 
659
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
660
/**
661
 * Returns whether the given set of divisors are valid for a given refclk with
662
 * the given connectors.
663
 */
664
 
665
static bool intel_PLL_is_valid(struct drm_device *dev,
666
			       const intel_limit_t *limit,
667
			       const intel_clock_t *clock)
668
{
669
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
2342 Serge 670
		INTELPllInvalid("p1 out of range\n");
2327 Serge 671
	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
2342 Serge 672
		INTELPllInvalid("p out of range\n");
2327 Serge 673
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
2342 Serge 674
		INTELPllInvalid("m2 out of range\n");
2327 Serge 675
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
2342 Serge 676
		INTELPllInvalid("m1 out of range\n");
2327 Serge 677
	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
2342 Serge 678
		INTELPllInvalid("m1 <= m2\n");
2327 Serge 679
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
2342 Serge 680
		INTELPllInvalid("m out of range\n");
2327 Serge 681
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
2342 Serge 682
		INTELPllInvalid("n out of range\n");
2327 Serge 683
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
2342 Serge 684
		INTELPllInvalid("vco out of range\n");
2327 Serge 685
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
686
	 * connector, etc., rather than just a single range.
687
	 */
688
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
2342 Serge 689
		INTELPllInvalid("dot out of range\n");
2327 Serge 690
 
691
	return true;
692
}
693
 
694
static bool
695
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 696
		    int target, int refclk, intel_clock_t *match_clock,
697
		    intel_clock_t *best_clock)
2327 Serge 698
 
699
{
700
	struct drm_device *dev = crtc->dev;
701
	struct drm_i915_private *dev_priv = dev->dev_private;
702
	intel_clock_t clock;
703
	int err = target;
704
 
705
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
706
	    (I915_READ(LVDS)) != 0) {
707
		/*
708
		 * For LVDS, if the panel is on, just rely on its current
709
		 * settings for dual-channel.  We haven't figured out how to
710
		 * reliably set up different single/dual channel state, if we
711
		 * even can.
712
		 */
3031 serge 713
		if (is_dual_link_lvds(dev_priv, LVDS))
2327 Serge 714
			clock.p2 = limit->p2.p2_fast;
715
		else
716
			clock.p2 = limit->p2.p2_slow;
717
	} else {
718
		if (target < limit->p2.dot_limit)
719
			clock.p2 = limit->p2.p2_slow;
720
		else
721
			clock.p2 = limit->p2.p2_fast;
722
	}
723
 
2342 Serge 724
	memset(best_clock, 0, sizeof(*best_clock));
2327 Serge 725
 
726
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
727
	     clock.m1++) {
728
		for (clock.m2 = limit->m2.min;
729
		     clock.m2 <= limit->m2.max; clock.m2++) {
730
			/* m1 is always 0 in Pineview */
731
			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
732
				break;
733
			for (clock.n = limit->n.min;
734
			     clock.n <= limit->n.max; clock.n++) {
735
				for (clock.p1 = limit->p1.min;
736
					clock.p1 <= limit->p1.max; clock.p1++) {
737
					int this_err;
738
 
739
					intel_clock(dev, refclk, &clock);
740
					if (!intel_PLL_is_valid(dev, limit,
741
								&clock))
742
						continue;
3031 serge 743
					if (match_clock &&
744
					    clock.p != match_clock->p)
745
						continue;
2327 Serge 746
 
747
					this_err = abs(clock.dot - target);
748
					if (this_err < err) {
749
						*best_clock = clock;
750
						err = this_err;
751
					}
752
				}
753
			}
754
		}
755
	}
756
 
757
	return (err != target);
758
}
759
 
760
static bool
761
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 762
			int target, int refclk, intel_clock_t *match_clock,
763
			intel_clock_t *best_clock)
2327 Serge 764
{
765
	struct drm_device *dev = crtc->dev;
766
	struct drm_i915_private *dev_priv = dev->dev_private;
767
	intel_clock_t clock;
768
	int max_n;
769
	bool found;
770
	/* approximately equals target * 0.00585 */
771
	int err_most = (target >> 8) + (target >> 9);
772
	found = false;
773
 
774
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
775
		int lvds_reg;
776
 
777
		if (HAS_PCH_SPLIT(dev))
778
			lvds_reg = PCH_LVDS;
779
		else
780
			lvds_reg = LVDS;
781
		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
782
		    LVDS_CLKB_POWER_UP)
783
			clock.p2 = limit->p2.p2_fast;
784
		else
785
			clock.p2 = limit->p2.p2_slow;
786
	} else {
787
		if (target < limit->p2.dot_limit)
788
			clock.p2 = limit->p2.p2_slow;
789
		else
790
			clock.p2 = limit->p2.p2_fast;
791
	}
792
 
793
	memset(best_clock, 0, sizeof(*best_clock));
794
	max_n = limit->n.max;
795
	/* based on hardware requirement, prefer smaller n to precision */
796
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
797
		/* based on hardware requirement, prefere larger m1,m2 */
798
		for (clock.m1 = limit->m1.max;
799
		     clock.m1 >= limit->m1.min; clock.m1--) {
800
			for (clock.m2 = limit->m2.max;
801
			     clock.m2 >= limit->m2.min; clock.m2--) {
802
				for (clock.p1 = limit->p1.max;
803
				     clock.p1 >= limit->p1.min; clock.p1--) {
804
					int this_err;
805
 
806
					intel_clock(dev, refclk, &clock);
807
					if (!intel_PLL_is_valid(dev, limit,
808
								&clock))
809
						continue;
3031 serge 810
					if (match_clock &&
811
					    clock.p != match_clock->p)
812
						continue;
2327 Serge 813
 
814
					this_err = abs(clock.dot - target);
815
					if (this_err < err_most) {
816
						*best_clock = clock;
817
						err_most = this_err;
818
						max_n = clock.n;
819
						found = true;
820
					}
821
				}
822
			}
823
		}
824
	}
825
	return found;
826
}
827
 
828
static bool
829
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 830
			   int target, int refclk, intel_clock_t *match_clock,
831
			   intel_clock_t *best_clock)
2327 Serge 832
{
833
	struct drm_device *dev = crtc->dev;
834
	intel_clock_t clock;
835
 
836
	if (target < 200000) {
837
		clock.n = 1;
838
		clock.p1 = 2;
839
		clock.p2 = 10;
840
		clock.m1 = 12;
841
		clock.m2 = 9;
842
	} else {
843
		clock.n = 2;
844
		clock.p1 = 1;
845
		clock.p2 = 10;
846
		clock.m1 = 14;
847
		clock.m2 = 8;
848
	}
849
	intel_clock(dev, refclk, &clock);
850
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
851
	return true;
852
}
853
 
854
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
855
static bool
856
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
3031 serge 857
		      int target, int refclk, intel_clock_t *match_clock,
858
		      intel_clock_t *best_clock)
2327 Serge 859
{
860
	intel_clock_t clock;
861
	if (target < 200000) {
862
		clock.p1 = 2;
863
		clock.p2 = 10;
864
		clock.n = 2;
865
		clock.m1 = 23;
866
		clock.m2 = 8;
867
	} else {
868
		clock.p1 = 1;
869
		clock.p2 = 10;
870
		clock.n = 1;
871
		clock.m1 = 14;
872
		clock.m2 = 2;
873
	}
874
	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
875
	clock.p = (clock.p1 * clock.p2);
876
	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
877
	clock.vco = 0;
878
	memcpy(best_clock, &clock, sizeof(intel_clock_t));
879
	return true;
880
}
3031 serge 881
static bool
882
intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
883
			int target, int refclk, intel_clock_t *match_clock,
884
			intel_clock_t *best_clock)
885
{
886
	u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
887
	u32 m, n, fastclk;
888
	u32 updrate, minupdate, fracbits, p;
889
	unsigned long bestppm, ppm, absppm;
890
	int dotclk, flag;
2327 Serge 891
 
3031 serge 892
	flag = 0;
893
	dotclk = target * 1000;
894
	bestppm = 1000000;
895
	ppm = absppm = 0;
896
	fastclk = dotclk / (2*100);
897
	updrate = 0;
898
	minupdate = 19200;
899
	fracbits = 1;
900
	n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
901
	bestm1 = bestm2 = bestp1 = bestp2 = 0;
902
 
903
	/* based on hardware requirement, prefer smaller n to precision */
904
	for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
905
		updrate = refclk / n;
906
		for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
907
			for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
908
				if (p2 > 10)
909
					p2 = p2 - 1;
910
				p = p1 * p2;
911
				/* based on hardware requirement, prefer bigger m1,m2 values */
912
				for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
913
					m2 = (((2*(fastclk * p * n / m1 )) +
914
					       refclk) / (2*refclk));
915
					m = m1 * m2;
916
					vco = updrate * m;
917
					if (vco >= limit->vco.min && vco < limit->vco.max) {
918
						ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
919
						absppm = (ppm > 0) ? ppm : (-ppm);
920
						if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
921
							bestppm = 0;
922
							flag = 1;
923
						}
924
						if (absppm < bestppm - 10) {
925
							bestppm = absppm;
926
							flag = 1;
927
						}
928
						if (flag) {
929
							bestn = n;
930
							bestm1 = m1;
931
							bestm2 = m2;
932
							bestp1 = p1;
933
							bestp2 = p2;
934
							flag = 0;
935
						}
936
					}
937
				}
938
			}
939
		}
940
	}
941
	best_clock->n = bestn;
942
	best_clock->m1 = bestm1;
943
	best_clock->m2 = bestm2;
944
	best_clock->p1 = bestp1;
945
	best_clock->p2 = bestp2;
946
 
947
	return true;
948
}
949
 
3243 Serge 950
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
951
					     enum pipe pipe)
952
{
953
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
954
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
955
 
956
	return intel_crtc->cpu_transcoder;
957
}
958
 
3031 serge 959
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
960
{
961
	struct drm_i915_private *dev_priv = dev->dev_private;
962
	u32 frame, frame_reg = PIPEFRAME(pipe);
963
 
964
	frame = I915_READ(frame_reg);
965
 
966
	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
967
		DRM_DEBUG_KMS("vblank wait timed out\n");
968
}
969
 
2327 Serge 970
/**
971
 * intel_wait_for_vblank - wait for vblank on a given pipe
972
 * @dev: drm device
973
 * @pipe: pipe to wait for
974
 *
975
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
976
 * mode setting code.
977
 */
978
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
979
{
980
	struct drm_i915_private *dev_priv = dev->dev_private;
981
	int pipestat_reg = PIPESTAT(pipe);
982
 
3031 serge 983
	if (INTEL_INFO(dev)->gen >= 5) {
984
		ironlake_wait_for_vblank(dev, pipe);
985
		return;
986
	}
987
 
2327 Serge 988
	/* Clear existing vblank status. Note this will clear any other
989
	 * sticky status fields as well.
990
	 *
991
	 * This races with i915_driver_irq_handler() with the result
992
	 * that either function could miss a vblank event.  Here it is not
993
	 * fatal, as we will either wait upon the next vblank interrupt or
994
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
995
	 * called during modeset at which time the GPU should be idle and
996
	 * should *not* be performing page flips and thus not waiting on
997
	 * vblanks...
998
	 * Currently, the result of us stealing a vblank from the irq
999
	 * handler is that a single frame will be skipped during swapbuffers.
1000
	 */
1001
	I915_WRITE(pipestat_reg,
1002
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
1003
 
1004
	/* Wait for vblank interrupt bit to set */
1005
	if (wait_for(I915_READ(pipestat_reg) &
1006
		     PIPE_VBLANK_INTERRUPT_STATUS,
1007
		     50))
1008
		DRM_DEBUG_KMS("vblank wait timed out\n");
1009
}
1010
 
1011
/*
1012
 * intel_wait_for_pipe_off - wait for pipe to turn off
1013
 * @dev: drm device
1014
 * @pipe: pipe to wait for
1015
 *
1016
 * After disabling a pipe, we can't wait for vblank in the usual way,
1017
 * spinning on the vblank interrupt status bit, since we won't actually
1018
 * see an interrupt when the pipe is disabled.
1019
 *
1020
 * On Gen4 and above:
1021
 *   wait for the pipe register state bit to turn off
1022
 *
1023
 * Otherwise:
1024
 *   wait for the display line value to settle (it usually
1025
 *   ends up stopping at the start of the next frame).
1026
 *
1027
 */
1028
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1029
{
1030
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 1031
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1032
								      pipe);
2327 Serge 1033
 
1034
	if (INTEL_INFO(dev)->gen >= 4) {
3243 Serge 1035
		int reg = PIPECONF(cpu_transcoder);
2327 Serge 1036
 
1037
		/* Wait for the Pipe State to go off */
1038
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1039
			     100))
3031 serge 1040
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 1041
	} else {
3031 serge 1042
		u32 last_line, line_mask;
2327 Serge 1043
		int reg = PIPEDSL(pipe);
3031 serge 1044
        unsigned long timeout = GetTimerTicks() + msecs_to_jiffies(100);
2327 Serge 1045
 
3031 serge 1046
		if (IS_GEN2(dev))
1047
			line_mask = DSL_LINEMASK_GEN2;
1048
		else
1049
			line_mask = DSL_LINEMASK_GEN3;
1050
 
2327 Serge 1051
		/* Wait for the display line to settle */
1052
		do {
3031 serge 1053
			last_line = I915_READ(reg) & line_mask;
2327 Serge 1054
			mdelay(5);
3031 serge 1055
		} while (((I915_READ(reg) & line_mask) != last_line) &&
1056
			 time_after(timeout, GetTimerTicks()));
1057
		if (time_after(GetTimerTicks(), timeout))
1058
			WARN(1, "pipe_off wait timed out\n");
2327 Serge 1059
	}
1060
}
1061
 
1062
static const char *state_string(bool enabled)
1063
{
1064
	return enabled ? "on" : "off";
1065
}
1066
 
1067
/* Only for pre-ILK configs */
1068
static void assert_pll(struct drm_i915_private *dev_priv,
1069
		       enum pipe pipe, bool state)
1070
{
1071
	int reg;
1072
	u32 val;
1073
	bool cur_state;
1074
 
1075
	reg = DPLL(pipe);
1076
	val = I915_READ(reg);
1077
	cur_state = !!(val & DPLL_VCO_ENABLE);
1078
	WARN(cur_state != state,
1079
	     "PLL state assertion failure (expected %s, current %s)\n",
1080
	     state_string(state), state_string(cur_state));
1081
}
1082
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
1083
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
1084
 
1085
/* For ILK+ */
1086
static void assert_pch_pll(struct drm_i915_private *dev_priv,
3031 serge 1087
			   struct intel_pch_pll *pll,
1088
			   struct intel_crtc *crtc,
1089
			   bool state)
2327 Serge 1090
{
1091
	u32 val;
1092
	bool cur_state;
1093
 
3031 serge 1094
	if (HAS_PCH_LPT(dev_priv->dev)) {
1095
		DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1096
		return;
1097
	}
2342 Serge 1098
 
3031 serge 1099
	if (WARN (!pll,
1100
		  "asserting PCH PLL %s with no PLL\n", state_string(state)))
1101
		return;
2342 Serge 1102
 
3031 serge 1103
	val = I915_READ(pll->pll_reg);
1104
	cur_state = !!(val & DPLL_VCO_ENABLE);
1105
	WARN(cur_state != state,
1106
	     "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
1107
	     pll->pll_reg, state_string(state), state_string(cur_state), val);
2342 Serge 1108
 
3031 serge 1109
	/* Make sure the selected PLL is correctly attached to the transcoder */
1110
	if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
1111
		u32 pch_dpll;
2342 Serge 1112
 
3031 serge 1113
		pch_dpll = I915_READ(PCH_DPLL_SEL);
1114
		cur_state = pll->pll_reg == _PCH_DPLL_B;
1115
		if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
1116
			  "PLL[%d] not attached to this transcoder %d: %08x\n",
1117
			  cur_state, crtc->pipe, pch_dpll)) {
1118
			cur_state = !!(val >> (4*crtc->pipe + 3));
2327 Serge 1119
	WARN(cur_state != state,
3031 serge 1120
			     "PLL[%d] not %s on this transcoder %d: %08x\n",
1121
			     pll->pll_reg == _PCH_DPLL_B,
1122
			     state_string(state),
1123
			     crtc->pipe,
1124
			     val);
1125
		}
1126
	}
2327 Serge 1127
}
3031 serge 1128
#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
1129
#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
2327 Serge 1130
 
1131
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1132
			  enum pipe pipe, bool state)
1133
{
1134
	int reg;
1135
	u32 val;
1136
	bool cur_state;
3243 Serge 1137
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1138
								      pipe);
2327 Serge 1139
 
3031 serge 1140
	if (IS_HASWELL(dev_priv->dev)) {
1141
		/* On Haswell, DDI is used instead of FDI_TX_CTL */
3243 Serge 1142
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
3031 serge 1143
		val = I915_READ(reg);
3243 Serge 1144
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
3031 serge 1145
	} else {
2327 Serge 1146
	reg = FDI_TX_CTL(pipe);
1147
	val = I915_READ(reg);
1148
	cur_state = !!(val & FDI_TX_ENABLE);
3031 serge 1149
	}
2327 Serge 1150
	WARN(cur_state != state,
1151
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1152
	     state_string(state), state_string(cur_state));
1153
}
1154
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1155
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1156
 
1157
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1158
			  enum pipe pipe, bool state)
1159
{
1160
	int reg;
1161
	u32 val;
1162
	bool cur_state;
1163
 
1164
	reg = FDI_RX_CTL(pipe);
1165
	val = I915_READ(reg);
1166
	cur_state = !!(val & FDI_RX_ENABLE);
1167
	WARN(cur_state != state,
1168
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1169
	     state_string(state), state_string(cur_state));
1170
}
1171
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1172
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1173
 
1174
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1175
				      enum pipe pipe)
1176
{
1177
	int reg;
1178
	u32 val;
1179
 
1180
	/* ILK FDI PLL is always enabled */
1181
	if (dev_priv->info->gen == 5)
1182
		return;
1183
 
3031 serge 1184
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1185
	if (IS_HASWELL(dev_priv->dev))
1186
		return;
1187
 
2327 Serge 1188
	reg = FDI_TX_CTL(pipe);
1189
	val = I915_READ(reg);
1190
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1191
}
1192
 
1193
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1194
				      enum pipe pipe)
1195
{
1196
	int reg;
1197
	u32 val;
1198
 
1199
	reg = FDI_RX_CTL(pipe);
1200
	val = I915_READ(reg);
1201
	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1202
}
1203
 
1204
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1205
				  enum pipe pipe)
1206
{
1207
	int pp_reg, lvds_reg;
1208
	u32 val;
1209
	enum pipe panel_pipe = PIPE_A;
1210
	bool locked = true;
1211
 
1212
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1213
		pp_reg = PCH_PP_CONTROL;
1214
		lvds_reg = PCH_LVDS;
1215
	} else {
1216
		pp_reg = PP_CONTROL;
1217
		lvds_reg = LVDS;
1218
	}
1219
 
1220
	val = I915_READ(pp_reg);
1221
	if (!(val & PANEL_POWER_ON) ||
1222
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1223
		locked = false;
1224
 
1225
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1226
		panel_pipe = PIPE_B;
1227
 
1228
	WARN(panel_pipe == pipe && locked,
1229
	     "panel assertion failure, pipe %c regs locked\n",
1230
	     pipe_name(pipe));
1231
}
1232
 
2342 Serge 1233
void assert_pipe(struct drm_i915_private *dev_priv,
2327 Serge 1234
			enum pipe pipe, bool state)
1235
{
1236
	int reg;
1237
	u32 val;
1238
	bool cur_state;
3243 Serge 1239
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1240
								      pipe);
2327 Serge 1241
 
3031 serge 1242
	/* if we need the pipe A quirk it must be always on */
1243
	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1244
		state = true;
1245
 
3243 Serge 1246
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1247
	val = I915_READ(reg);
1248
	cur_state = !!(val & PIPECONF_ENABLE);
1249
	WARN(cur_state != state,
1250
	     "pipe %c assertion failure (expected %s, current %s)\n",
1251
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1252
}
1253
 
3031 serge 1254
static void assert_plane(struct drm_i915_private *dev_priv,
1255
			 enum plane plane, bool state)
2327 Serge 1256
{
1257
	int reg;
1258
	u32 val;
3031 serge 1259
	bool cur_state;
2327 Serge 1260
 
1261
	reg = DSPCNTR(plane);
1262
	val = I915_READ(reg);
3031 serge 1263
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1264
	WARN(cur_state != state,
1265
	     "plane %c assertion failure (expected %s, current %s)\n",
1266
	     plane_name(plane), state_string(state), state_string(cur_state));
2327 Serge 1267
}
1268
 
3031 serge 1269
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1270
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1271
 
2327 Serge 1272
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1273
				   enum pipe pipe)
1274
{
1275
	int reg, i;
1276
	u32 val;
1277
	int cur_pipe;
1278
 
1279
	/* Planes are fixed to pipes on ILK+ */
3031 serge 1280
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1281
		reg = DSPCNTR(pipe);
1282
		val = I915_READ(reg);
1283
		WARN((val & DISPLAY_PLANE_ENABLE),
1284
		     "plane %c assertion failure, should be disabled but not\n",
1285
		     plane_name(pipe));
2327 Serge 1286
		return;
3031 serge 1287
	}
2327 Serge 1288
 
1289
	/* Need to check both planes against the pipe */
1290
	for (i = 0; i < 2; i++) {
1291
		reg = DSPCNTR(i);
1292
		val = I915_READ(reg);
1293
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1294
			DISPPLANE_SEL_PIPE_SHIFT;
1295
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1296
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1297
		     plane_name(i), pipe_name(pipe));
1298
	}
1299
}
1300
 
1301
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1302
{
1303
	u32 val;
1304
	bool enabled;
1305
 
3031 serge 1306
	if (HAS_PCH_LPT(dev_priv->dev)) {
1307
		DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1308
		return;
1309
	}
1310
 
2327 Serge 1311
	val = I915_READ(PCH_DREF_CONTROL);
1312
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1313
			    DREF_SUPERSPREAD_SOURCE_MASK));
1314
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1315
}
1316
 
1317
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1318
				       enum pipe pipe)
1319
{
1320
	int reg;
1321
	u32 val;
1322
	bool enabled;
1323
 
1324
	reg = TRANSCONF(pipe);
1325
	val = I915_READ(reg);
1326
	enabled = !!(val & TRANS_ENABLE);
1327
	WARN(enabled,
1328
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1329
	     pipe_name(pipe));
1330
}
1331
 
1332
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1333
			    enum pipe pipe, u32 port_sel, u32 val)
1334
{
1335
	if ((val & DP_PORT_EN) == 0)
1336
		return false;
1337
 
1338
	if (HAS_PCH_CPT(dev_priv->dev)) {
1339
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1340
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1341
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1342
			return false;
1343
	} else {
1344
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1345
			return false;
1346
	}
1347
	return true;
1348
}
1349
 
1350
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1351
			      enum pipe pipe, u32 val)
1352
{
1353
	if ((val & PORT_ENABLE) == 0)
1354
		return false;
1355
 
1356
	if (HAS_PCH_CPT(dev_priv->dev)) {
1357
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1358
			return false;
1359
	} else {
1360
		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1361
			return false;
1362
	}
1363
	return true;
1364
}
1365
 
1366
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1367
			      enum pipe pipe, u32 val)
1368
{
1369
	if ((val & LVDS_PORT_EN) == 0)
1370
		return false;
1371
 
1372
	if (HAS_PCH_CPT(dev_priv->dev)) {
1373
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1374
			return false;
1375
	} else {
1376
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1377
			return false;
1378
	}
1379
	return true;
1380
}
1381
 
1382
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1383
			      enum pipe pipe, u32 val)
1384
{
1385
	if ((val & ADPA_DAC_ENABLE) == 0)
1386
		return false;
1387
	if (HAS_PCH_CPT(dev_priv->dev)) {
1388
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1389
			return false;
1390
	} else {
1391
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1392
			return false;
1393
	}
1394
	return true;
1395
}
1396
 
1397
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1398
				   enum pipe pipe, int reg, u32 port_sel)
1399
{
1400
	u32 val = I915_READ(reg);
1401
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1402
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1403
	     reg, pipe_name(pipe));
3031 serge 1404
 
1405
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1406
	     && (val & DP_PIPEB_SELECT),
1407
	     "IBX PCH dp port still using transcoder B\n");
2327 Serge 1408
}
1409
 
1410
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1411
				     enum pipe pipe, int reg)
1412
{
1413
	u32 val = I915_READ(reg);
3031 serge 1414
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1415
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
2327 Serge 1416
	     reg, pipe_name(pipe));
3031 serge 1417
 
1418
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
1419
	     && (val & SDVO_PIPE_B_SELECT),
1420
	     "IBX PCH hdmi port still using transcoder B\n");
2327 Serge 1421
}
1422
 
1423
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1424
				      enum pipe pipe)
1425
{
1426
	int reg;
1427
	u32 val;
1428
 
1429
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1430
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1431
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1432
 
1433
	reg = PCH_ADPA;
1434
	val = I915_READ(reg);
3031 serge 1435
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1436
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1437
	     pipe_name(pipe));
1438
 
1439
	reg = PCH_LVDS;
1440
	val = I915_READ(reg);
3031 serge 1441
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
2327 Serge 1442
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1443
	     pipe_name(pipe));
1444
 
1445
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1446
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1447
	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1448
}
1449
 
1450
/**
1451
 * intel_enable_pll - enable a PLL
1452
 * @dev_priv: i915 private structure
1453
 * @pipe: pipe PLL to enable
1454
 *
1455
 * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1456
 * make sure the PLL reg is writable first though, since the panel write
1457
 * protect mechanism may be enabled.
1458
 *
1459
 * Note!  This is for pre-ILK only.
3031 serge 1460
 *
1461
 * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
2327 Serge 1462
 */
1463
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1464
{
1465
    int reg;
1466
    u32 val;
1467
 
1468
    /* No really, not for ILK+ */
3031 serge 1469
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
2327 Serge 1470
 
1471
    /* PLL is protected by panel, make sure we can write it */
1472
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1473
        assert_panel_unlocked(dev_priv, pipe);
1474
 
1475
    reg = DPLL(pipe);
1476
    val = I915_READ(reg);
1477
    val |= DPLL_VCO_ENABLE;
1478
 
1479
    /* We do this three times for luck */
1480
    I915_WRITE(reg, val);
1481
    POSTING_READ(reg);
1482
    udelay(150); /* wait for warmup */
1483
    I915_WRITE(reg, val);
1484
    POSTING_READ(reg);
1485
    udelay(150); /* wait for warmup */
1486
    I915_WRITE(reg, val);
1487
    POSTING_READ(reg);
1488
    udelay(150); /* wait for warmup */
1489
}
1490
 
1491
/**
1492
 * intel_disable_pll - disable a PLL
1493
 * @dev_priv: i915 private structure
1494
 * @pipe: pipe PLL to disable
1495
 *
1496
 * Disable the PLL for @pipe, making sure the pipe is off first.
1497
 *
1498
 * Note!  This is for pre-ILK only.
1499
 */
1500
static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1501
{
1502
	int reg;
1503
	u32 val;
1504
 
1505
	/* Don't disable pipe A or pipe A PLLs if needed */
1506
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1507
		return;
1508
 
1509
	/* Make sure the pipe isn't still relying on us */
1510
	assert_pipe_disabled(dev_priv, pipe);
1511
 
1512
	reg = DPLL(pipe);
1513
	val = I915_READ(reg);
1514
	val &= ~DPLL_VCO_ENABLE;
1515
	I915_WRITE(reg, val);
1516
	POSTING_READ(reg);
1517
}
1518
 
3031 serge 1519
/* SBI access */
1520
static void
3243 Serge 1521
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1522
		enum intel_sbi_destination destination)
3031 serge 1523
{
1524
	unsigned long flags;
3243 Serge 1525
	u32 tmp;
3031 serge 1526
 
1527
	spin_lock_irqsave(&dev_priv->dpio_lock, flags);
3243 Serge 1528
	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
3031 serge 1529
		DRM_ERROR("timeout waiting for SBI to become ready\n");
1530
		goto out_unlock;
1531
	}
1532
 
3243 Serge 1533
	I915_WRITE(SBI_ADDR, (reg << 16));
1534
	I915_WRITE(SBI_DATA, value);
3031 serge 1535
 
3243 Serge 1536
	if (destination == SBI_ICLK)
1537
		tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
1538
	else
1539
		tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
1540
	I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
1541
 
3031 serge 1542
	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1543
				100)) {
1544
		DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1545
		goto out_unlock;
1546
	}
1547
 
1548
out_unlock:
1549
	spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1550
}
1551
 
1552
static u32
3243 Serge 1553
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1554
	       enum intel_sbi_destination destination)
3031 serge 1555
{
1556
	unsigned long flags;
1557
	u32 value = 0;
1558
 
1559
	spin_lock_irqsave(&dev_priv->dpio_lock, flags);
3243 Serge 1560
	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
3031 serge 1561
		DRM_ERROR("timeout waiting for SBI to become ready\n");
1562
		goto out_unlock;
1563
	}
1564
 
3243 Serge 1565
	I915_WRITE(SBI_ADDR, (reg << 16));
3031 serge 1566
 
3243 Serge 1567
	if (destination == SBI_ICLK)
1568
		value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
1569
	else
1570
		value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
1571
	I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
1572
 
3031 serge 1573
	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1574
				100)) {
1575
		DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1576
		goto out_unlock;
1577
	}
1578
 
1579
	value = I915_READ(SBI_DATA);
1580
 
1581
out_unlock:
1582
	spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1583
	return value;
1584
}
1585
 
2327 Serge 1586
/**
3243 Serge 1587
 * ironlake_enable_pch_pll - enable PCH PLL
2327 Serge 1588
 * @dev_priv: i915 private structure
1589
 * @pipe: pipe PLL to enable
1590
 *
1591
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1592
 * drives the transcoder clock.
1593
 */
3243 Serge 1594
static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
2327 Serge 1595
{
3031 serge 1596
	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1597
	struct intel_pch_pll *pll;
2327 Serge 1598
	int reg;
1599
	u32 val;
1600
 
3031 serge 1601
	/* PCH PLLs only available on ILK, SNB and IVB */
1602
	BUG_ON(dev_priv->info->gen < 5);
1603
	pll = intel_crtc->pch_pll;
1604
	if (pll == NULL)
2342 Serge 1605
		return;
1606
 
3031 serge 1607
	if (WARN_ON(pll->refcount == 0))
1608
		return;
2327 Serge 1609
 
3031 serge 1610
	DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1611
		      pll->pll_reg, pll->active, pll->on,
1612
		      intel_crtc->base.base.id);
1613
 
2327 Serge 1614
	/* PCH refclock must be enabled first */
1615
	assert_pch_refclk_enabled(dev_priv);
1616
 
3031 serge 1617
	if (pll->active++ && pll->on) {
1618
		assert_pch_pll_enabled(dev_priv, pll, NULL);
1619
		return;
1620
	}
1621
 
1622
	DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1623
 
1624
	reg = pll->pll_reg;
2327 Serge 1625
	val = I915_READ(reg);
1626
	val |= DPLL_VCO_ENABLE;
1627
	I915_WRITE(reg, val);
1628
	POSTING_READ(reg);
1629
	udelay(200);
3031 serge 1630
 
1631
	pll->on = true;
2327 Serge 1632
}
1633
 
3031 serge 1634
static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
2327 Serge 1635
{
3031 serge 1636
	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1637
	struct intel_pch_pll *pll = intel_crtc->pch_pll;
2327 Serge 1638
	int reg;
3031 serge 1639
	u32 val;
2327 Serge 1640
 
1641
	/* PCH only available on ILK+ */
1642
	BUG_ON(dev_priv->info->gen < 5);
3031 serge 1643
	if (pll == NULL)
1644
	       return;
2327 Serge 1645
 
3031 serge 1646
	if (WARN_ON(pll->refcount == 0))
1647
		return;
2327 Serge 1648
 
3031 serge 1649
	DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1650
		      pll->pll_reg, pll->active, pll->on,
1651
		      intel_crtc->base.base.id);
2342 Serge 1652
 
3031 serge 1653
	if (WARN_ON(pll->active == 0)) {
1654
		assert_pch_pll_disabled(dev_priv, pll, NULL);
1655
		return;
1656
	}
2342 Serge 1657
 
3031 serge 1658
	if (--pll->active) {
1659
		assert_pch_pll_enabled(dev_priv, pll, NULL);
2342 Serge 1660
		return;
3031 serge 1661
	}
2342 Serge 1662
 
3031 serge 1663
	DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1664
 
1665
	/* Make sure transcoder isn't still depending on us */
1666
	assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
1667
 
1668
	reg = pll->pll_reg;
2327 Serge 1669
	val = I915_READ(reg);
1670
	val &= ~DPLL_VCO_ENABLE;
1671
	I915_WRITE(reg, val);
1672
	POSTING_READ(reg);
1673
	udelay(200);
3031 serge 1674
 
1675
	pll->on = false;
2327 Serge 1676
}
1677
 
3243 Serge 1678
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1679
				    enum pipe pipe)
1680
{
3243 Serge 1681
	struct drm_device *dev = dev_priv->dev;
3031 serge 1682
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
3243 Serge 1683
	uint32_t reg, val, pipeconf_val;
2327 Serge 1684
 
1685
	/* PCH only available on ILK+ */
1686
	BUG_ON(dev_priv->info->gen < 5);
1687
 
1688
	/* Make sure PCH DPLL is enabled */
3031 serge 1689
	assert_pch_pll_enabled(dev_priv,
1690
			       to_intel_crtc(crtc)->pch_pll,
1691
			       to_intel_crtc(crtc));
2327 Serge 1692
 
1693
	/* FDI must be feeding us bits for PCH ports */
1694
	assert_fdi_tx_enabled(dev_priv, pipe);
1695
	assert_fdi_rx_enabled(dev_priv, pipe);
1696
 
3243 Serge 1697
	if (HAS_PCH_CPT(dev)) {
1698
		/* Workaround: Set the timing override bit before enabling the
1699
		 * pch transcoder. */
1700
		reg = TRANS_CHICKEN2(pipe);
1701
		val = I915_READ(reg);
1702
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1703
		I915_WRITE(reg, val);
3031 serge 1704
	}
3243 Serge 1705
 
2327 Serge 1706
	reg = TRANSCONF(pipe);
1707
	val = I915_READ(reg);
3031 serge 1708
	pipeconf_val = I915_READ(PIPECONF(pipe));
2327 Serge 1709
 
1710
	if (HAS_PCH_IBX(dev_priv->dev)) {
1711
		/*
1712
		 * make the BPC in transcoder be consistent with
1713
		 * that in pipeconf reg.
1714
		 */
1715
		val &= ~PIPE_BPC_MASK;
3031 serge 1716
		val |= pipeconf_val & PIPE_BPC_MASK;
2327 Serge 1717
	}
3031 serge 1718
 
1719
	val &= ~TRANS_INTERLACE_MASK;
1720
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1721
		if (HAS_PCH_IBX(dev_priv->dev) &&
1722
		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1723
			val |= TRANS_LEGACY_INTERLACED_ILK;
1724
		else
1725
			val |= TRANS_INTERLACED;
1726
	else
1727
		val |= TRANS_PROGRESSIVE;
1728
 
2327 Serge 1729
	I915_WRITE(reg, val | TRANS_ENABLE);
1730
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1731
		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1732
}
1733
 
3243 Serge 1734
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1735
				      enum transcoder cpu_transcoder)
1736
{
1737
	u32 val, pipeconf_val;
1738
 
1739
	/* PCH only available on ILK+ */
1740
	BUG_ON(dev_priv->info->gen < 5);
1741
 
1742
	/* FDI must be feeding us bits for PCH ports */
1743
	assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
1744
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1745
 
1746
	/* Workaround: set timing override bit. */
1747
	val = I915_READ(_TRANSA_CHICKEN2);
1748
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1749
	I915_WRITE(_TRANSA_CHICKEN2, val);
1750
 
1751
	val = TRANS_ENABLE;
1752
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1753
 
1754
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1755
	    PIPECONF_INTERLACED_ILK)
1756
		val |= TRANS_INTERLACED;
1757
	else
1758
		val |= TRANS_PROGRESSIVE;
1759
 
1760
	I915_WRITE(TRANSCONF(TRANSCODER_A), val);
1761
	if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
1762
		DRM_ERROR("Failed to enable PCH transcoder\n");
1763
}
1764
 
1765
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2327 Serge 1766
				     enum pipe pipe)
1767
{
3243 Serge 1768
	struct drm_device *dev = dev_priv->dev;
1769
	uint32_t reg, val;
2327 Serge 1770
 
1771
	/* FDI relies on the transcoder */
1772
	assert_fdi_tx_disabled(dev_priv, pipe);
1773
	assert_fdi_rx_disabled(dev_priv, pipe);
1774
 
1775
	/* Ports must be off as well */
1776
	assert_pch_ports_disabled(dev_priv, pipe);
1777
 
1778
	reg = TRANSCONF(pipe);
1779
	val = I915_READ(reg);
1780
	val &= ~TRANS_ENABLE;
1781
	I915_WRITE(reg, val);
1782
	/* wait for PCH transcoder off, transcoder state */
1783
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2342 Serge 1784
		DRM_ERROR("failed to disable transcoder %d\n", pipe);
3243 Serge 1785
 
1786
	if (!HAS_PCH_IBX(dev)) {
1787
		/* Workaround: Clear the timing override chicken bit again. */
1788
		reg = TRANS_CHICKEN2(pipe);
1789
		val = I915_READ(reg);
1790
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1791
		I915_WRITE(reg, val);
1792
	}
2327 Serge 1793
}
1794
 
3243 Serge 1795
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1796
{
1797
	u32 val;
1798
 
1799
	val = I915_READ(_TRANSACONF);
1800
	val &= ~TRANS_ENABLE;
1801
	I915_WRITE(_TRANSACONF, val);
1802
	/* wait for PCH transcoder off, transcoder state */
1803
	if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
1804
		DRM_ERROR("Failed to disable PCH transcoder\n");
1805
 
1806
	/* Workaround: clear timing override bit. */
1807
	val = I915_READ(_TRANSA_CHICKEN2);
1808
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1809
	I915_WRITE(_TRANSA_CHICKEN2, val);
1810
}
1811
 
2327 Serge 1812
/**
1813
 * intel_enable_pipe - enable a pipe, asserting requirements
1814
 * @dev_priv: i915 private structure
1815
 * @pipe: pipe to enable
1816
 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1817
 *
1818
 * Enable @pipe, making sure that various hardware specific requirements
1819
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1820
 *
1821
 * @pipe should be %PIPE_A or %PIPE_B.
1822
 *
1823
 * Will wait until the pipe is actually running (i.e. first vblank) before
1824
 * returning.
1825
 */
1826
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1827
			      bool pch_port)
1828
{
3243 Serge 1829
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1830
								      pipe);
1831
	enum transcoder pch_transcoder;
2327 Serge 1832
	int reg;
1833
	u32 val;
1834
 
3243 Serge 1835
	if (IS_HASWELL(dev_priv->dev))
1836
		pch_transcoder = TRANSCODER_A;
1837
	else
1838
		pch_transcoder = pipe;
1839
 
2327 Serge 1840
	/*
1841
	 * A pipe without a PLL won't actually be able to drive bits from
1842
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1843
	 * need the check.
1844
	 */
1845
	if (!HAS_PCH_SPLIT(dev_priv->dev))
1846
		assert_pll_enabled(dev_priv, pipe);
1847
	else {
1848
		if (pch_port) {
1849
			/* if driving the PCH, we need FDI enabled */
3243 Serge 1850
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1851
			assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
2327 Serge 1852
		}
1853
		/* FIXME: assert CPU port conditions for SNB+ */
1854
	}
1855
 
3243 Serge 1856
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1857
	val = I915_READ(reg);
1858
	if (val & PIPECONF_ENABLE)
1859
		return;
1860
 
1861
	I915_WRITE(reg, val | PIPECONF_ENABLE);
1862
	intel_wait_for_vblank(dev_priv->dev, pipe);
1863
}
1864
 
1865
/**
1866
 * intel_disable_pipe - disable a pipe, asserting requirements
1867
 * @dev_priv: i915 private structure
1868
 * @pipe: pipe to disable
1869
 *
1870
 * Disable @pipe, making sure that various hardware specific requirements
1871
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1872
 *
1873
 * @pipe should be %PIPE_A or %PIPE_B.
1874
 *
1875
 * Will wait until the pipe has shut down before returning.
1876
 */
1877
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1878
			       enum pipe pipe)
1879
{
3243 Serge 1880
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1881
								      pipe);
2327 Serge 1882
	int reg;
1883
	u32 val;
1884
 
3031 serge 1885
    /*
2327 Serge 1886
	 * Make sure planes won't keep trying to pump pixels to us,
1887
	 * or we might hang the display.
1888
	 */
1889
	assert_planes_disabled(dev_priv, pipe);
1890
 
1891
	/* Don't disable pipe A or pipe A PLLs if needed */
1892
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1893
		return;
1894
 
3243 Serge 1895
	reg = PIPECONF(cpu_transcoder);
2327 Serge 1896
	val = I915_READ(reg);
1897
	if ((val & PIPECONF_ENABLE) == 0)
1898
		return;
1899
 
1900
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1901
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1902
}
1903
 
1904
/*
1905
 * Plane regs are double buffered, going from enabled->disabled needs a
1906
 * trigger in order to latch.  The display address reg provides this.
1907
 */
3031 serge 1908
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
2327 Serge 1909
				      enum plane plane)
1910
{
3243 Serge 1911
	if (dev_priv->info->gen >= 4)
1912
		I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1913
	else
2327 Serge 1914
	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1915
}
1916
 
1917
/**
1918
 * intel_enable_plane - enable a display plane on a given pipe
1919
 * @dev_priv: i915 private structure
1920
 * @plane: plane to enable
1921
 * @pipe: pipe being fed
1922
 *
1923
 * Enable @plane on @pipe, making sure that @pipe is running first.
1924
 */
1925
static void intel_enable_plane(struct drm_i915_private *dev_priv,
1926
			       enum plane plane, enum pipe pipe)
1927
{
1928
	int reg;
1929
	u32 val;
1930
 
1931
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1932
	assert_pipe_enabled(dev_priv, pipe);
1933
 
1934
	reg = DSPCNTR(plane);
1935
	val = I915_READ(reg);
1936
	if (val & DISPLAY_PLANE_ENABLE)
1937
		return;
1938
 
1939
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1940
	intel_flush_display_plane(dev_priv, plane);
1941
	intel_wait_for_vblank(dev_priv->dev, pipe);
1942
}
1943
 
1944
/**
1945
 * intel_disable_plane - disable a display plane
1946
 * @dev_priv: i915 private structure
1947
 * @plane: plane to disable
1948
 * @pipe: pipe consuming the data
1949
 *
1950
 * Disable @plane; should be an independent operation.
1951
 */
1952
static void intel_disable_plane(struct drm_i915_private *dev_priv,
1953
				enum plane plane, enum pipe pipe)
1954
{
1955
	int reg;
1956
	u32 val;
1957
 
1958
	reg = DSPCNTR(plane);
1959
	val = I915_READ(reg);
1960
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1961
		return;
1962
 
1963
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1964
	intel_flush_display_plane(dev_priv, plane);
3031 serge 1965
    intel_wait_for_vblank(dev_priv->dev, pipe);
2327 Serge 1966
}
1967
 
2335 Serge 1968
int
1969
intel_pin_and_fence_fb_obj(struct drm_device *dev,
1970
			   struct drm_i915_gem_object *obj,
1971
			   struct intel_ring_buffer *pipelined)
1972
{
1973
	struct drm_i915_private *dev_priv = dev->dev_private;
1974
	u32 alignment;
1975
	int ret;
2327 Serge 1976
 
2335 Serge 1977
	switch (obj->tiling_mode) {
1978
	case I915_TILING_NONE:
1979
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1980
			alignment = 128 * 1024;
1981
		else if (INTEL_INFO(dev)->gen >= 4)
1982
			alignment = 4 * 1024;
1983
		else
1984
			alignment = 64 * 1024;
1985
		break;
1986
	case I915_TILING_X:
1987
		/* pin() will align the object as required by fence */
1988
		alignment = 0;
1989
		break;
1990
	case I915_TILING_Y:
1991
		/* FIXME: Is this true? */
1992
		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1993
		return -EINVAL;
1994
	default:
1995
		BUG();
1996
	}
2327 Serge 1997
 
2335 Serge 1998
	dev_priv->mm.interruptible = false;
1999
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2000
	if (ret)
2001
		goto err_interruptible;
2327 Serge 2002
 
2335 Serge 2003
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2004
	 * fence, whereas 965+ only requires a fence if using
2005
	 * framebuffer compression.  For simplicity, we always install
2006
	 * a fence as the cost is not that onerous.
2007
	 */
2327 Serge 2008
 
2335 Serge 2009
	dev_priv->mm.interruptible = true;
2010
	return 0;
2327 Serge 2011
 
2335 Serge 2012
err_unpin:
2344 Serge 2013
	i915_gem_object_unpin(obj);
2335 Serge 2014
err_interruptible:
2015
	dev_priv->mm.interruptible = true;
2016
	return ret;
2017
}
2327 Serge 2018
 
3031 serge 2019
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2020
{
2021
//	i915_gem_object_unpin_fence(obj);
2022
//	i915_gem_object_unpin(obj);
2023
}
2024
 
2025
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2026
 * is assumed to be a power-of-two. */
3243 Serge 2027
unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
3031 serge 2028
							unsigned int bpp,
2029
							unsigned int pitch)
2030
{
2031
	int tile_rows, tiles;
2032
 
2033
	tile_rows = *y / 8;
2034
	*y %= 8;
2035
	tiles = *x / (512/bpp);
2036
	*x %= 512/bpp;
2037
 
2038
	return tile_rows * pitch * 8 + tiles * 4096;
2039
}
2040
 
2327 Serge 2041
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2042
                 int x, int y)
2043
{
2044
    struct drm_device *dev = crtc->dev;
2045
    struct drm_i915_private *dev_priv = dev->dev_private;
2046
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2047
    struct intel_framebuffer *intel_fb;
2048
    struct drm_i915_gem_object *obj;
2049
    int plane = intel_crtc->plane;
3031 serge 2050
	unsigned long linear_offset;
2327 Serge 2051
    u32 dspcntr;
2052
    u32 reg;
2053
 
2054
    switch (plane) {
2055
    case 0:
2056
    case 1:
2057
        break;
2058
    default:
2059
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2060
        return -EINVAL;
2061
    }
2062
 
2063
    intel_fb = to_intel_framebuffer(fb);
2064
    obj = intel_fb->obj;
2065
 
2066
    reg = DSPCNTR(plane);
2067
    dspcntr = I915_READ(reg);
2068
    /* Mask out pixel format bits in case we change it */
2069
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
3243 Serge 2070
	switch (fb->pixel_format) {
2071
	case DRM_FORMAT_C8:
2327 Serge 2072
        dspcntr |= DISPPLANE_8BPP;
2073
        break;
3243 Serge 2074
	case DRM_FORMAT_XRGB1555:
2075
	case DRM_FORMAT_ARGB1555:
2076
		dspcntr |= DISPPLANE_BGRX555;
2077
		break;
2078
	case DRM_FORMAT_RGB565:
2079
		dspcntr |= DISPPLANE_BGRX565;
2080
		break;
2081
	case DRM_FORMAT_XRGB8888:
2082
	case DRM_FORMAT_ARGB8888:
2083
		dspcntr |= DISPPLANE_BGRX888;
2084
		break;
2085
	case DRM_FORMAT_XBGR8888:
2086
	case DRM_FORMAT_ABGR8888:
2087
		dspcntr |= DISPPLANE_RGBX888;
2088
		break;
2089
	case DRM_FORMAT_XRGB2101010:
2090
	case DRM_FORMAT_ARGB2101010:
2091
		dspcntr |= DISPPLANE_BGRX101010;
2327 Serge 2092
        break;
3243 Serge 2093
	case DRM_FORMAT_XBGR2101010:
2094
	case DRM_FORMAT_ABGR2101010:
2095
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 2096
        break;
2097
    default:
3243 Serge 2098
		DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
2327 Serge 2099
        return -EINVAL;
2100
    }
3243 Serge 2101
 
2327 Serge 2102
    if (INTEL_INFO(dev)->gen >= 4) {
2103
        if (obj->tiling_mode != I915_TILING_NONE)
2104
            dspcntr |= DISPPLANE_TILED;
2105
        else
2106
            dspcntr &= ~DISPPLANE_TILED;
2107
    }
2108
 
2109
    I915_WRITE(reg, dspcntr);
2110
 
3031 serge 2111
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2327 Serge 2112
 
3031 serge 2113
	if (INTEL_INFO(dev)->gen >= 4) {
2114
		intel_crtc->dspaddr_offset =
3243 Serge 2115
			intel_gen4_compute_offset_xtiled(&x, &y,
3031 serge 2116
							   fb->bits_per_pixel / 8,
2117
							   fb->pitches[0]);
2118
		linear_offset -= intel_crtc->dspaddr_offset;
2119
	} else {
2120
		intel_crtc->dspaddr_offset = linear_offset;
2121
	}
2122
 
2123
	DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2124
		      obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
2342 Serge 2125
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2327 Serge 2126
    if (INTEL_INFO(dev)->gen >= 4) {
3031 serge 2127
		I915_MODIFY_DISPBASE(DSPSURF(plane),
2128
				     obj->gtt_offset + intel_crtc->dspaddr_offset);
2327 Serge 2129
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2130
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2327 Serge 2131
    } else
3031 serge 2132
		I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
2327 Serge 2133
    POSTING_READ(reg);
2134
 
2135
    return 0;
2136
}
2137
 
2138
static int ironlake_update_plane(struct drm_crtc *crtc,
2139
                 struct drm_framebuffer *fb, int x, int y)
2140
{
2141
    struct drm_device *dev = crtc->dev;
2142
    struct drm_i915_private *dev_priv = dev->dev_private;
2143
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2144
    struct intel_framebuffer *intel_fb;
2145
    struct drm_i915_gem_object *obj;
2146
    int plane = intel_crtc->plane;
3031 serge 2147
	unsigned long linear_offset;
2327 Serge 2148
    u32 dspcntr;
2149
    u32 reg;
2150
 
2151
    switch (plane) {
2152
    case 0:
2153
    case 1:
2342 Serge 2154
	case 2:
2327 Serge 2155
        break;
2156
    default:
2157
        DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2158
        return -EINVAL;
2159
    }
2160
 
2161
    intel_fb = to_intel_framebuffer(fb);
2162
    obj = intel_fb->obj;
2163
 
2164
    reg = DSPCNTR(plane);
2165
    dspcntr = I915_READ(reg);
2166
    /* Mask out pixel format bits in case we change it */
2167
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
3243 Serge 2168
	switch (fb->pixel_format) {
2169
	case DRM_FORMAT_C8:
2327 Serge 2170
        dspcntr |= DISPPLANE_8BPP;
2171
        break;
3243 Serge 2172
	case DRM_FORMAT_RGB565:
2173
		dspcntr |= DISPPLANE_BGRX565;
2327 Serge 2174
        break;
3243 Serge 2175
	case DRM_FORMAT_XRGB8888:
2176
	case DRM_FORMAT_ARGB8888:
2177
		dspcntr |= DISPPLANE_BGRX888;
2178
		break;
2179
	case DRM_FORMAT_XBGR8888:
2180
	case DRM_FORMAT_ABGR8888:
2181
		dspcntr |= DISPPLANE_RGBX888;
2182
		break;
2183
	case DRM_FORMAT_XRGB2101010:
2184
	case DRM_FORMAT_ARGB2101010:
2185
		dspcntr |= DISPPLANE_BGRX101010;
2186
		break;
2187
	case DRM_FORMAT_XBGR2101010:
2188
	case DRM_FORMAT_ABGR2101010:
2189
		dspcntr |= DISPPLANE_RGBX101010;
2327 Serge 2190
        break;
2191
    default:
3243 Serge 2192
		DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
2327 Serge 2193
        return -EINVAL;
2194
    }
2195
 
2196
//    if (obj->tiling_mode != I915_TILING_NONE)
2197
//        dspcntr |= DISPPLANE_TILED;
2198
//    else
2199
        dspcntr &= ~DISPPLANE_TILED;
2200
 
2201
    /* must disable */
2202
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2203
 
2204
    I915_WRITE(reg, dspcntr);
2205
 
3031 serge 2206
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2207
	intel_crtc->dspaddr_offset =
3243 Serge 2208
		intel_gen4_compute_offset_xtiled(&x, &y,
3031 serge 2209
						   fb->bits_per_pixel / 8,
2210
						   fb->pitches[0]);
2211
	linear_offset -= intel_crtc->dspaddr_offset;
2327 Serge 2212
 
3031 serge 2213
	DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2214
		      obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
2342 Serge 2215
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
3031 serge 2216
	I915_MODIFY_DISPBASE(DSPSURF(plane),
2217
			     obj->gtt_offset + intel_crtc->dspaddr_offset);
3243 Serge 2218
	if (IS_HASWELL(dev)) {
2219
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2220
	} else {
2330 Serge 2221
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3031 serge 2222
	I915_WRITE(DSPLINOFF(plane), linear_offset);
3243 Serge 2223
	}
2330 Serge 2224
	POSTING_READ(reg);
2327 Serge 2225
 
2226
    return 0;
2227
}
2228
 
2229
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2230
static int
2231
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2232
			   int x, int y, enum mode_set_atomic state)
2233
{
2234
	struct drm_device *dev = crtc->dev;
2235
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2236
 
2237
	if (dev_priv->display.disable_fbc)
2238
		dev_priv->display.disable_fbc(dev);
2239
	intel_increase_pllclock(crtc);
2240
 
2241
	return dev_priv->display.update_plane(crtc, fb, x, y);
2242
}
2243
 
2244
#if 0
2245
static int
2246
intel_finish_fb(struct drm_framebuffer *old_fb)
2247
{
2248
	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2249
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2250
	bool was_interruptible = dev_priv->mm.interruptible;
2327 Serge 2251
	int ret;
2252
 
3031 serge 2253
	wait_event(dev_priv->pending_flip_queue,
2254
		   atomic_read(&dev_priv->mm.wedged) ||
2255
		   atomic_read(&obj->pending_flip) == 0);
2327 Serge 2256
 
3031 serge 2257
	/* Big Hammer, we also need to ensure that any pending
2258
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2259
	 * current scanout is retired before unpinning the old
2260
	 * framebuffer.
2261
	 *
2262
	 * This should only fail upon a hung GPU, in which case we
2263
	 * can safely continue.
2264
	 */
2265
	dev_priv->mm.interruptible = false;
2266
	ret = i915_gem_object_finish_gpu(obj);
2267
	dev_priv->mm.interruptible = was_interruptible;
2327 Serge 2268
 
3031 serge 2269
	return ret;
2327 Serge 2270
}
3031 serge 2271
#endif
2327 Serge 2272
 
2273
static int
2274
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
3031 serge 2275
		    struct drm_framebuffer *fb)
2327 Serge 2276
{
2277
	struct drm_device *dev = crtc->dev;
3031 serge 2278
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 2279
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 2280
	struct drm_framebuffer *old_fb;
2342 Serge 2281
	int ret;
2327 Serge 2282
 
2283
	/* no fb bound */
3031 serge 2284
	if (!fb) {
2327 Serge 2285
		DRM_ERROR("No FB bound\n");
2286
		return 0;
2287
	}
2288
 
3031 serge 2289
	if(intel_crtc->plane > dev_priv->num_pipe) {
2290
		DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2291
				intel_crtc->plane,
2292
				dev_priv->num_pipe);
2327 Serge 2293
		return -EINVAL;
2294
	}
2295
 
2296
	mutex_lock(&dev->struct_mutex);
3031 serge 2297
//   ret = intel_pin_and_fence_fb_obj(dev,
2298
//                    to_intel_framebuffer(fb)->obj,
2299
//                    NULL);
2300
//   if (ret != 0) {
2301
//       mutex_unlock(&dev->struct_mutex);
2302
//       DRM_ERROR("pin & fence failed\n");
2303
//       return ret;
2304
//   }
2327 Serge 2305
 
3031 serge 2306
//   if (crtc->fb)
2307
//       intel_finish_fb(crtc->fb);
2308
 
2309
	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2327 Serge 2310
	if (ret) {
3031 serge 2311
		intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2327 Serge 2312
		mutex_unlock(&dev->struct_mutex);
2313
		DRM_ERROR("failed to update base address\n");
3243 Serge 2314
        return ret;
2327 Serge 2315
	}
2316
 
3031 serge 2317
	old_fb = crtc->fb;
2318
	crtc->fb = fb;
2319
	crtc->x = x;
2320
	crtc->y = y;
2321
 
2322
	if (old_fb) {
2323
		intel_wait_for_vblank(dev, intel_crtc->pipe);
2324
		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2325
	}
2326
 
2327
	intel_update_fbc(dev);
2336 Serge 2328
	mutex_unlock(&dev->struct_mutex);
2327 Serge 2329
 
2336 Serge 2330
    return 0;
2327 Serge 2331
}
2332
 
2333
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2334
{
2335
	struct drm_device *dev = crtc->dev;
2336
	struct drm_i915_private *dev_priv = dev->dev_private;
2337
	u32 dpa_ctl;
2338
 
2339
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2340
	dpa_ctl = I915_READ(DP_A);
2341
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2342
 
2343
	if (clock < 200000) {
2344
		u32 temp;
2345
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2346
		/* workaround for 160Mhz:
2347
		   1) program 0x4600c bits 15:0 = 0x8124
2348
		   2) program 0x46010 bit 0 = 1
2349
		   3) program 0x46034 bit 24 = 1
2350
		   4) program 0x64000 bit 14 = 1
2351
		   */
2352
		temp = I915_READ(0x4600c);
2353
		temp &= 0xffff0000;
2354
		I915_WRITE(0x4600c, temp | 0x8124);
2355
 
2356
		temp = I915_READ(0x46010);
2357
		I915_WRITE(0x46010, temp | 1);
2358
 
2359
		temp = I915_READ(0x46034);
2360
		I915_WRITE(0x46034, temp | (1 << 24));
2361
	} else {
2362
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2363
	}
2364
	I915_WRITE(DP_A, dpa_ctl);
2365
 
2366
	POSTING_READ(DP_A);
2367
	udelay(500);
2368
}
2369
 
2370
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2371
{
2372
	struct drm_device *dev = crtc->dev;
2373
	struct drm_i915_private *dev_priv = dev->dev_private;
2374
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2375
	int pipe = intel_crtc->pipe;
2376
	u32 reg, temp;
2377
 
2378
	/* enable normal train */
2379
	reg = FDI_TX_CTL(pipe);
2380
	temp = I915_READ(reg);
2381
	if (IS_IVYBRIDGE(dev)) {
2382
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2383
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2384
	} else {
2385
		temp &= ~FDI_LINK_TRAIN_NONE;
2386
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2387
	}
2388
	I915_WRITE(reg, temp);
2389
 
2390
	reg = FDI_RX_CTL(pipe);
2391
	temp = I915_READ(reg);
2392
	if (HAS_PCH_CPT(dev)) {
2393
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2394
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2395
	} else {
2396
		temp &= ~FDI_LINK_TRAIN_NONE;
2397
		temp |= FDI_LINK_TRAIN_NONE;
2398
	}
2399
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2400
 
2401
	/* wait one idle pattern time */
2402
	POSTING_READ(reg);
2403
	udelay(1000);
2404
 
2405
	/* IVB wants error correction enabled */
2406
	if (IS_IVYBRIDGE(dev))
2407
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2408
			   FDI_FE_ERRC_ENABLE);
2409
}
2410
 
3243 Serge 2411
static void ivb_modeset_global_resources(struct drm_device *dev)
2327 Serge 2412
{
2413
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 2414
	struct intel_crtc *pipe_B_crtc =
2415
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2416
	struct intel_crtc *pipe_C_crtc =
2417
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2418
	uint32_t temp;
2327 Serge 2419
 
3243 Serge 2420
	/* When everything is off disable fdi C so that we could enable fdi B
2421
	 * with all lanes. XXX: This misses the case where a pipe is not using
2422
	 * any pch resources and so doesn't need any fdi lanes. */
2423
	if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
2424
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2425
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2426
 
2427
		temp = I915_READ(SOUTH_CHICKEN1);
2428
		temp &= ~FDI_BC_BIFURCATION_SELECT;
2429
		DRM_DEBUG_KMS("disabling fdi C rx\n");
2430
		I915_WRITE(SOUTH_CHICKEN1, temp);
2431
	}
2327 Serge 2432
}
2433
 
2434
/* The FDI link training functions for ILK/Ibexpeak. */
2435
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2436
{
2437
    struct drm_device *dev = crtc->dev;
2438
    struct drm_i915_private *dev_priv = dev->dev_private;
2439
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2440
    int pipe = intel_crtc->pipe;
2441
    int plane = intel_crtc->plane;
2442
    u32 reg, temp, tries;
2443
 
2444
    /* FDI needs bits from pipe & plane first */
2445
    assert_pipe_enabled(dev_priv, pipe);
2446
    assert_plane_enabled(dev_priv, plane);
2447
 
2448
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2449
       for train result */
2450
    reg = FDI_RX_IMR(pipe);
2451
    temp = I915_READ(reg);
2452
    temp &= ~FDI_RX_SYMBOL_LOCK;
2453
    temp &= ~FDI_RX_BIT_LOCK;
2454
    I915_WRITE(reg, temp);
2455
    I915_READ(reg);
2456
    udelay(150);
2457
 
2458
    /* enable CPU FDI TX and PCH FDI RX */
2459
    reg = FDI_TX_CTL(pipe);
2460
    temp = I915_READ(reg);
2461
    temp &= ~(7 << 19);
2462
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2463
    temp &= ~FDI_LINK_TRAIN_NONE;
2464
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2465
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2466
 
2467
    reg = FDI_RX_CTL(pipe);
2468
    temp = I915_READ(reg);
2469
    temp &= ~FDI_LINK_TRAIN_NONE;
2470
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2471
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2472
 
2473
    POSTING_READ(reg);
2474
    udelay(150);
2475
 
2476
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2477
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2478
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2479
               FDI_RX_PHASE_SYNC_POINTER_EN);
2480
 
2481
    reg = FDI_RX_IIR(pipe);
2482
    for (tries = 0; tries < 5; tries++) {
2483
        temp = I915_READ(reg);
2484
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2485
 
2486
        if ((temp & FDI_RX_BIT_LOCK)) {
2487
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2488
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2489
            break;
2490
        }
2491
    }
2492
    if (tries == 5)
2493
        DRM_ERROR("FDI train 1 fail!\n");
2494
 
2495
    /* Train 2 */
2496
    reg = FDI_TX_CTL(pipe);
2497
    temp = I915_READ(reg);
2498
    temp &= ~FDI_LINK_TRAIN_NONE;
2499
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2500
    I915_WRITE(reg, temp);
2501
 
2502
    reg = FDI_RX_CTL(pipe);
2503
    temp = I915_READ(reg);
2504
    temp &= ~FDI_LINK_TRAIN_NONE;
2505
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2506
    I915_WRITE(reg, temp);
2507
 
2508
    POSTING_READ(reg);
2509
    udelay(150);
2510
 
2511
    reg = FDI_RX_IIR(pipe);
2512
    for (tries = 0; tries < 5; tries++) {
2513
        temp = I915_READ(reg);
2514
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2515
 
2516
        if (temp & FDI_RX_SYMBOL_LOCK) {
2517
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2518
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2519
            break;
2520
        }
2521
    }
2522
    if (tries == 5)
2523
        DRM_ERROR("FDI train 2 fail!\n");
2524
 
2525
    DRM_DEBUG_KMS("FDI train done\n");
2526
 
2527
}
2528
 
2342 Serge 2529
static const int snb_b_fdi_train_param[] = {
2327 Serge 2530
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2531
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2532
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2533
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2534
};
2535
 
2536
/* The FDI link training functions for SNB/Cougarpoint. */
2537
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2538
{
2539
    struct drm_device *dev = crtc->dev;
2540
    struct drm_i915_private *dev_priv = dev->dev_private;
2541
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2542
    int pipe = intel_crtc->pipe;
3031 serge 2543
	u32 reg, temp, i, retry;
2327 Serge 2544
 
2545
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2546
       for train result */
2547
    reg = FDI_RX_IMR(pipe);
2548
    temp = I915_READ(reg);
2549
    temp &= ~FDI_RX_SYMBOL_LOCK;
2550
    temp &= ~FDI_RX_BIT_LOCK;
2551
    I915_WRITE(reg, temp);
2552
 
2553
    POSTING_READ(reg);
2554
    udelay(150);
2555
 
2556
    /* enable CPU FDI TX and PCH FDI RX */
2557
    reg = FDI_TX_CTL(pipe);
2558
    temp = I915_READ(reg);
2559
    temp &= ~(7 << 19);
2560
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2561
    temp &= ~FDI_LINK_TRAIN_NONE;
2562
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2563
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2564
    /* SNB-B */
2565
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2566
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2567
 
3243 Serge 2568
	I915_WRITE(FDI_RX_MISC(pipe),
2569
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2570
 
2327 Serge 2571
    reg = FDI_RX_CTL(pipe);
2572
    temp = I915_READ(reg);
2573
    if (HAS_PCH_CPT(dev)) {
2574
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2575
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2576
    } else {
2577
        temp &= ~FDI_LINK_TRAIN_NONE;
2578
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2579
    }
2580
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2581
 
2582
    POSTING_READ(reg);
2583
    udelay(150);
2584
 
2342 Serge 2585
	for (i = 0; i < 4; i++) {
2327 Serge 2586
        reg = FDI_TX_CTL(pipe);
2587
        temp = I915_READ(reg);
2588
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2589
        temp |= snb_b_fdi_train_param[i];
2590
        I915_WRITE(reg, temp);
2591
 
2592
        POSTING_READ(reg);
2593
        udelay(500);
2594
 
3031 serge 2595
		for (retry = 0; retry < 5; retry++) {
2327 Serge 2596
        reg = FDI_RX_IIR(pipe);
2597
        temp = I915_READ(reg);
2598
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2599
        if (temp & FDI_RX_BIT_LOCK) {
2600
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2601
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2602
            break;
2603
        }
3031 serge 2604
			udelay(50);
2605
		}
2606
		if (retry < 5)
2607
			break;
2327 Serge 2608
    }
2609
    if (i == 4)
2610
        DRM_ERROR("FDI train 1 fail!\n");
2611
 
2612
    /* Train 2 */
2613
    reg = FDI_TX_CTL(pipe);
2614
    temp = I915_READ(reg);
2615
    temp &= ~FDI_LINK_TRAIN_NONE;
2616
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2617
    if (IS_GEN6(dev)) {
2618
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2619
        /* SNB-B */
2620
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2621
    }
2622
    I915_WRITE(reg, temp);
2623
 
2624
    reg = FDI_RX_CTL(pipe);
2625
    temp = I915_READ(reg);
2626
    if (HAS_PCH_CPT(dev)) {
2627
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2628
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2629
    } else {
2630
        temp &= ~FDI_LINK_TRAIN_NONE;
2631
        temp |= FDI_LINK_TRAIN_PATTERN_2;
2632
    }
2633
    I915_WRITE(reg, temp);
2634
 
2635
    POSTING_READ(reg);
2636
    udelay(150);
2637
 
2342 Serge 2638
	for (i = 0; i < 4; i++) {
2327 Serge 2639
        reg = FDI_TX_CTL(pipe);
2640
        temp = I915_READ(reg);
2641
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2642
        temp |= snb_b_fdi_train_param[i];
2643
        I915_WRITE(reg, temp);
2644
 
2645
        POSTING_READ(reg);
2646
        udelay(500);
2647
 
3031 serge 2648
		for (retry = 0; retry < 5; retry++) {
2327 Serge 2649
        reg = FDI_RX_IIR(pipe);
2650
        temp = I915_READ(reg);
2651
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2652
        if (temp & FDI_RX_SYMBOL_LOCK) {
2653
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2654
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2655
            break;
2656
        }
3031 serge 2657
			udelay(50);
2658
		}
2659
		if (retry < 5)
2660
			break;
2327 Serge 2661
    }
2662
    if (i == 4)
2663
        DRM_ERROR("FDI train 2 fail!\n");
2664
 
2665
    DRM_DEBUG_KMS("FDI train done.\n");
2666
}
2667
 
2668
/* Manual link training for Ivy Bridge A0 parts */
2669
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2670
{
2671
    struct drm_device *dev = crtc->dev;
2672
    struct drm_i915_private *dev_priv = dev->dev_private;
2673
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2674
    int pipe = intel_crtc->pipe;
2675
    u32 reg, temp, i;
2676
 
2677
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2678
       for train result */
2679
    reg = FDI_RX_IMR(pipe);
2680
    temp = I915_READ(reg);
2681
    temp &= ~FDI_RX_SYMBOL_LOCK;
2682
    temp &= ~FDI_RX_BIT_LOCK;
2683
    I915_WRITE(reg, temp);
2684
 
2685
    POSTING_READ(reg);
2686
    udelay(150);
2687
 
3243 Serge 2688
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2689
		      I915_READ(FDI_RX_IIR(pipe)));
2690
 
2327 Serge 2691
    /* enable CPU FDI TX and PCH FDI RX */
2692
    reg = FDI_TX_CTL(pipe);
2693
    temp = I915_READ(reg);
2694
    temp &= ~(7 << 19);
2695
    temp |= (intel_crtc->fdi_lanes - 1) << 19;
2696
    temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2697
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2698
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2699
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2342 Serge 2700
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 2701
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2702
 
3243 Serge 2703
	I915_WRITE(FDI_RX_MISC(pipe),
2704
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2705
 
2327 Serge 2706
    reg = FDI_RX_CTL(pipe);
2707
    temp = I915_READ(reg);
2708
    temp &= ~FDI_LINK_TRAIN_AUTO;
2709
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2710
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2342 Serge 2711
	temp |= FDI_COMPOSITE_SYNC;
2327 Serge 2712
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2713
 
2714
    POSTING_READ(reg);
2715
    udelay(150);
2716
 
2342 Serge 2717
	for (i = 0; i < 4; i++) {
2327 Serge 2718
        reg = FDI_TX_CTL(pipe);
2719
        temp = I915_READ(reg);
2720
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2721
        temp |= snb_b_fdi_train_param[i];
2722
        I915_WRITE(reg, temp);
2723
 
2724
        POSTING_READ(reg);
2725
        udelay(500);
2726
 
2727
        reg = FDI_RX_IIR(pipe);
2728
        temp = I915_READ(reg);
2729
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2730
 
2731
        if (temp & FDI_RX_BIT_LOCK ||
2732
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2733
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3243 Serge 2734
			DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
2327 Serge 2735
            break;
2736
        }
2737
    }
2738
    if (i == 4)
2739
        DRM_ERROR("FDI train 1 fail!\n");
2740
 
2741
    /* Train 2 */
2742
    reg = FDI_TX_CTL(pipe);
2743
    temp = I915_READ(reg);
2744
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2745
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2746
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2747
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2748
    I915_WRITE(reg, temp);
2749
 
2750
    reg = FDI_RX_CTL(pipe);
2751
    temp = I915_READ(reg);
2752
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2753
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2754
    I915_WRITE(reg, temp);
2755
 
2756
    POSTING_READ(reg);
2757
    udelay(150);
2758
 
2342 Serge 2759
	for (i = 0; i < 4; i++) {
2327 Serge 2760
        reg = FDI_TX_CTL(pipe);
2761
        temp = I915_READ(reg);
2762
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2763
        temp |= snb_b_fdi_train_param[i];
2764
        I915_WRITE(reg, temp);
2765
 
2766
        POSTING_READ(reg);
2767
        udelay(500);
2768
 
2769
        reg = FDI_RX_IIR(pipe);
2770
        temp = I915_READ(reg);
2771
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2772
 
2773
        if (temp & FDI_RX_SYMBOL_LOCK) {
2774
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3243 Serge 2775
			DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
2327 Serge 2776
            break;
2777
        }
2778
    }
2779
    if (i == 4)
2780
        DRM_ERROR("FDI train 2 fail!\n");
2781
 
2782
    DRM_DEBUG_KMS("FDI train done.\n");
2783
}
2784
 
3031 serge 2785
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2327 Serge 2786
{
3031 serge 2787
	struct drm_device *dev = intel_crtc->base.dev;
2327 Serge 2788
	struct drm_i915_private *dev_priv = dev->dev_private;
2789
	int pipe = intel_crtc->pipe;
2790
	u32 reg, temp;
2791
 
2792
 
2793
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2794
	reg = FDI_RX_CTL(pipe);
2795
	temp = I915_READ(reg);
2796
	temp &= ~((0x7 << 19) | (0x7 << 16));
2797
	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2798
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2799
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2800
 
2801
	POSTING_READ(reg);
2802
	udelay(200);
2803
 
2804
	/* Switch from Rawclk to PCDclk */
2805
	temp = I915_READ(reg);
2806
	I915_WRITE(reg, temp | FDI_PCDCLK);
2807
 
2808
	POSTING_READ(reg);
2809
	udelay(200);
2810
 
3031 serge 2811
	/* On Haswell, the PLL configuration for ports and pipes is handled
2812
	 * separately, as part of DDI setup */
2813
	if (!IS_HASWELL(dev)) {
2327 Serge 2814
	/* Enable CPU FDI TX PLL, always on for Ironlake */
2815
	reg = FDI_TX_CTL(pipe);
2816
	temp = I915_READ(reg);
2817
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2818
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2819
 
2820
		POSTING_READ(reg);
2821
		udelay(100);
2822
	}
3031 serge 2823
	}
2327 Serge 2824
}
2825
 
3031 serge 2826
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2827
{
2828
	struct drm_device *dev = intel_crtc->base.dev;
2829
	struct drm_i915_private *dev_priv = dev->dev_private;
2830
	int pipe = intel_crtc->pipe;
2831
	u32 reg, temp;
2832
 
2833
	/* Switch from PCDclk to Rawclk */
2834
	reg = FDI_RX_CTL(pipe);
2835
	temp = I915_READ(reg);
2836
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
2837
 
2838
	/* Disable CPU FDI TX PLL */
2839
	reg = FDI_TX_CTL(pipe);
2840
	temp = I915_READ(reg);
2841
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2842
 
2843
	POSTING_READ(reg);
2844
	udelay(100);
2845
 
2846
	reg = FDI_RX_CTL(pipe);
2847
	temp = I915_READ(reg);
2848
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2849
 
2850
	/* Wait for the clocks to turn off. */
2851
	POSTING_READ(reg);
2852
	udelay(100);
2853
}
2854
 
2327 Serge 2855
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2856
{
2857
	struct drm_device *dev = crtc->dev;
2858
	struct drm_i915_private *dev_priv = dev->dev_private;
2859
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2860
	int pipe = intel_crtc->pipe;
2861
	u32 reg, temp;
2862
 
2863
	/* disable CPU FDI tx and PCH FDI rx */
2864
	reg = FDI_TX_CTL(pipe);
2865
	temp = I915_READ(reg);
2866
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2867
	POSTING_READ(reg);
2868
 
2869
	reg = FDI_RX_CTL(pipe);
2870
	temp = I915_READ(reg);
2871
	temp &= ~(0x7 << 16);
2872
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2873
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2874
 
2875
	POSTING_READ(reg);
2876
	udelay(100);
2877
 
2878
	/* Ironlake workaround, disable clock pointer after downing FDI */
2879
	if (HAS_PCH_IBX(dev)) {
2880
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2881
	}
2882
 
2883
	/* still set train pattern 1 */
2884
	reg = FDI_TX_CTL(pipe);
2885
	temp = I915_READ(reg);
2886
	temp &= ~FDI_LINK_TRAIN_NONE;
2887
	temp |= FDI_LINK_TRAIN_PATTERN_1;
2888
	I915_WRITE(reg, temp);
2889
 
2890
	reg = FDI_RX_CTL(pipe);
2891
	temp = I915_READ(reg);
2892
	if (HAS_PCH_CPT(dev)) {
2893
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2894
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2895
	} else {
2896
		temp &= ~FDI_LINK_TRAIN_NONE;
2897
		temp |= FDI_LINK_TRAIN_PATTERN_1;
2898
	}
2899
	/* BPC in FDI rx is consistent with that in PIPECONF */
2900
	temp &= ~(0x07 << 16);
2901
	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2902
	I915_WRITE(reg, temp);
2903
 
2904
	POSTING_READ(reg);
2905
	udelay(100);
2906
}
2907
 
3031 serge 2908
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2327 Serge 2909
{
3031 serge 2910
	struct drm_device *dev = crtc->dev;
2327 Serge 2911
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2912
	unsigned long flags;
2913
	bool pending;
2327 Serge 2914
 
3031 serge 2915
	if (atomic_read(&dev_priv->mm.wedged))
2916
		return false;
2327 Serge 2917
 
3031 serge 2918
	spin_lock_irqsave(&dev->event_lock, flags);
2919
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2920
	spin_unlock_irqrestore(&dev->event_lock, flags);
2921
 
2922
	return pending;
2327 Serge 2923
}
2924
 
3031 serge 2925
#if 0
2327 Serge 2926
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2927
{
3031 serge 2928
	struct drm_device *dev = crtc->dev;
2929
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 2930
 
2931
	if (crtc->fb == NULL)
2932
		return;
2933
 
2360 Serge 2934
	wait_event(dev_priv->pending_flip_queue,
3031 serge 2935
		   !intel_crtc_has_pending_flip(crtc));
2936
 
2937
	mutex_lock(&dev->struct_mutex);
2938
	intel_finish_fb(crtc->fb);
2939
	mutex_unlock(&dev->struct_mutex);
2327 Serge 2940
}
3031 serge 2941
#endif
2327 Serge 2942
 
3243 Serge 2943
static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
2327 Serge 2944
{
2945
	struct drm_device *dev = crtc->dev;
3031 serge 2946
	struct intel_encoder *intel_encoder;
2327 Serge 2947
 
2948
	/*
2949
	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2950
	 * must be driven by its own crtc; no sharing is possible.
2951
	 */
3031 serge 2952
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2953
		switch (intel_encoder->type) {
2327 Serge 2954
		case INTEL_OUTPUT_EDP:
3031 serge 2955
			if (!intel_encoder_is_pch_edp(&intel_encoder->base))
2327 Serge 2956
				return false;
2957
			continue;
2958
		}
2959
	}
2960
 
2961
	return true;
2962
}
2963
 
3243 Serge 2964
static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
2965
{
2966
	return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
2967
}
2968
 
3031 serge 2969
/* Program iCLKIP clock to the desired frequency */
2970
static void lpt_program_iclkip(struct drm_crtc *crtc)
2971
{
2972
	struct drm_device *dev = crtc->dev;
2973
	struct drm_i915_private *dev_priv = dev->dev_private;
2974
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
2975
	u32 temp;
2976
 
2977
	/* It is necessary to ungate the pixclk gate prior to programming
2978
	 * the divisors, and gate it back when it is done.
2979
	 */
2980
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2981
 
2982
	/* Disable SSCCTL */
2983
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3243 Serge 2984
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
2985
				SBI_SSCCTL_DISABLE,
2986
			SBI_ICLK);
3031 serge 2987
 
2988
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
2989
	if (crtc->mode.clock == 20000) {
2990
		auxdiv = 1;
2991
		divsel = 0x41;
2992
		phaseinc = 0x20;
2993
	} else {
2994
		/* The iCLK virtual clock root frequency is in MHz,
2995
		 * but the crtc->mode.clock in in KHz. To get the divisors,
2996
		 * it is necessary to divide one by another, so we
2997
		 * convert the virtual clock precision to KHz here for higher
2998
		 * precision.
2999
		 */
3000
		u32 iclk_virtual_root_freq = 172800 * 1000;
3001
		u32 iclk_pi_range = 64;
3002
		u32 desired_divisor, msb_divisor_value, pi_value;
3003
 
3004
		desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
3005
		msb_divisor_value = desired_divisor / iclk_pi_range;
3006
		pi_value = desired_divisor % iclk_pi_range;
3007
 
3008
		auxdiv = 0;
3009
		divsel = msb_divisor_value - 2;
3010
		phaseinc = pi_value;
3011
	}
3012
 
3013
	/* This should not happen with any sane values */
3014
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3015
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3016
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3017
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3018
 
3019
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3020
			crtc->mode.clock,
3021
			auxdiv,
3022
			divsel,
3023
			phasedir,
3024
			phaseinc);
3025
 
3026
	/* Program SSCDIVINTPHASE6 */
3243 Serge 3027
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3031 serge 3028
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3029
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3030
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3031
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3032
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3033
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3243 Serge 3034
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3031 serge 3035
 
3036
	/* Program SSCAUXDIV */
3243 Serge 3037
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3031 serge 3038
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3039
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3243 Serge 3040
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3031 serge 3041
 
3042
	/* Enable modulator and associated divider */
3243 Serge 3043
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3031 serge 3044
	temp &= ~SBI_SSCCTL_DISABLE;
3243 Serge 3045
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3031 serge 3046
 
3047
	/* Wait for initialization time */
3048
	udelay(24);
3049
 
3050
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3051
}
3052
 
2327 Serge 3053
/*
3054
 * Enable PCH resources required for PCH ports:
3055
 *   - PCH PLLs
3056
 *   - FDI training & RX/TX
3057
 *   - update transcoder timings
3058
 *   - DP transcoding bits
3059
 *   - transcoder
3060
 */
3061
static void ironlake_pch_enable(struct drm_crtc *crtc)
3062
{
3063
	struct drm_device *dev = crtc->dev;
3064
	struct drm_i915_private *dev_priv = dev->dev_private;
3065
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3066
	int pipe = intel_crtc->pipe;
3031 serge 3067
	u32 reg, temp;
2327 Serge 3068
 
3031 serge 3069
	assert_transcoder_disabled(dev_priv, pipe);
3070
 
3243 Serge 3071
	/* Write the TU size bits before fdi link training, so that error
3072
	 * detection works. */
3073
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3074
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3075
 
2327 Serge 3076
	/* For PCH output, training FDI link */
3077
	dev_priv->display.fdi_link_train(crtc);
3078
 
3243 Serge 3079
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3080
	 * transcoder, and we actually should do this to not upset any PCH
3081
	 * transcoder that already use the clock when we share it.
3082
	 *
3083
	 * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
3084
	 * unconditionally resets the pll - we need that to have the right LVDS
3085
	 * enable sequence. */
3086
	ironlake_enable_pch_pll(intel_crtc);
2327 Serge 3087
 
3243 Serge 3088
	if (HAS_PCH_CPT(dev)) {
3031 serge 3089
		u32 sel;
2342 Serge 3090
 
2327 Serge 3091
		temp = I915_READ(PCH_DPLL_SEL);
3031 serge 3092
		switch (pipe) {
3093
		default:
3094
		case 0:
3095
			temp |= TRANSA_DPLL_ENABLE;
3096
			sel = TRANSA_DPLLB_SEL;
3097
			break;
3098
		case 1:
3099
			temp |= TRANSB_DPLL_ENABLE;
3100
			sel = TRANSB_DPLLB_SEL;
3101
			break;
3102
		case 2:
3103
			temp |= TRANSC_DPLL_ENABLE;
3104
			sel = TRANSC_DPLLB_SEL;
3105
			break;
2342 Serge 3106
		}
3031 serge 3107
		if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
3108
			temp |= sel;
3109
		else
3110
			temp &= ~sel;
2327 Serge 3111
		I915_WRITE(PCH_DPLL_SEL, temp);
3112
	}
3113
 
3114
	/* set transcoder timing, panel must allow it */
3115
	assert_panel_unlocked(dev_priv, pipe);
3116
	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3117
	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3118
	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
3119
 
3120
	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3121
	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3122
	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
3031 serge 3123
	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
2327 Serge 3124
 
3125
	intel_fdi_normal_train(crtc);
3126
 
3127
	/* For PCH DP, enable TRANS_DP_CTL */
3128
	if (HAS_PCH_CPT(dev) &&
2342 Serge 3129
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3130
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2327 Serge 3131
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3132
		reg = TRANS_DP_CTL(pipe);
3133
		temp = I915_READ(reg);
3134
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3135
			  TRANS_DP_SYNC_MASK |
3136
			  TRANS_DP_BPC_MASK);
3137
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3138
			 TRANS_DP_ENH_FRAMING);
3139
		temp |= bpc << 9; /* same format but at 11:9 */
3140
 
3141
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3142
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3143
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3144
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3145
 
3146
		switch (intel_trans_dp_port_sel(crtc)) {
3147
		case PCH_DP_B:
3148
			temp |= TRANS_DP_PORT_SEL_B;
3149
			break;
3150
		case PCH_DP_C:
3151
			temp |= TRANS_DP_PORT_SEL_C;
3152
			break;
3153
		case PCH_DP_D:
3154
			temp |= TRANS_DP_PORT_SEL_D;
3155
			break;
3156
		default:
3243 Serge 3157
			BUG();
2327 Serge 3158
		}
3159
 
3160
		I915_WRITE(reg, temp);
3161
	}
3162
 
3243 Serge 3163
	ironlake_enable_pch_transcoder(dev_priv, pipe);
2327 Serge 3164
}
3165
 
3243 Serge 3166
static void lpt_pch_enable(struct drm_crtc *crtc)
3167
{
3168
	struct drm_device *dev = crtc->dev;
3169
	struct drm_i915_private *dev_priv = dev->dev_private;
3170
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3171
	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3172
 
3173
	assert_transcoder_disabled(dev_priv, TRANSCODER_A);
3174
 
3175
	lpt_program_iclkip(crtc);
3176
 
3177
	/* Set transcoder timing. */
3178
	I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
3179
	I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
3180
	I915_WRITE(_TRANS_HSYNC_A,  I915_READ(HSYNC(cpu_transcoder)));
3181
 
3182
	I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
3183
	I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
3184
	I915_WRITE(_TRANS_VSYNC_A,  I915_READ(VSYNC(cpu_transcoder)));
3185
	I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
3186
 
3187
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3188
}
3189
 
3031 serge 3190
static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
3191
{
3192
	struct intel_pch_pll *pll = intel_crtc->pch_pll;
3193
 
3194
	if (pll == NULL)
3195
		return;
3196
 
3197
	if (pll->refcount == 0) {
3198
		WARN(1, "bad PCH PLL refcount\n");
3199
		return;
3200
	}
3201
 
3202
	--pll->refcount;
3203
	intel_crtc->pch_pll = NULL;
3204
}
3205
 
3206
static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
3207
{
3208
	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
3209
	struct intel_pch_pll *pll;
3210
	int i;
3211
 
3212
	pll = intel_crtc->pch_pll;
3213
	if (pll) {
3214
		DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
3215
			      intel_crtc->base.base.id, pll->pll_reg);
3216
		goto prepare;
3217
	}
3218
 
3219
	if (HAS_PCH_IBX(dev_priv->dev)) {
3220
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3221
		i = intel_crtc->pipe;
3222
		pll = &dev_priv->pch_plls[i];
3223
 
3224
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
3225
			      intel_crtc->base.base.id, pll->pll_reg);
3226
 
3227
		goto found;
3228
	}
3229
 
3230
	for (i = 0; i < dev_priv->num_pch_pll; i++) {
3231
		pll = &dev_priv->pch_plls[i];
3232
 
3233
		/* Only want to check enabled timings first */
3234
		if (pll->refcount == 0)
3235
			continue;
3236
 
3237
		if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
3238
		    fp == I915_READ(pll->fp0_reg)) {
3239
			DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
3240
				      intel_crtc->base.base.id,
3241
				      pll->pll_reg, pll->refcount, pll->active);
3242
 
3243
			goto found;
3244
		}
3245
	}
3246
 
3247
	/* Ok no matching timings, maybe there's a free one? */
3248
	for (i = 0; i < dev_priv->num_pch_pll; i++) {
3249
		pll = &dev_priv->pch_plls[i];
3250
		if (pll->refcount == 0) {
3251
			DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
3252
				      intel_crtc->base.base.id, pll->pll_reg);
3253
			goto found;
3254
		}
3255
	}
3256
 
3257
	return NULL;
3258
 
3259
found:
3260
	intel_crtc->pch_pll = pll;
3261
	pll->refcount++;
3262
	DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
3263
prepare: /* separate function? */
3264
	DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
3265
 
3266
	/* Wait for the clocks to stabilize before rewriting the regs */
3267
	I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3268
	POSTING_READ(pll->pll_reg);
3269
	udelay(150);
3270
 
3271
	I915_WRITE(pll->fp0_reg, fp);
3272
	I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3273
	pll->on = false;
3274
	return pll;
3275
}
3276
 
2342 Serge 3277
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3278
{
3279
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 3280
	int dslreg = PIPEDSL(pipe);
2342 Serge 3281
	u32 temp;
3282
 
3283
	temp = I915_READ(dslreg);
3284
	udelay(500);
3285
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
3286
		if (wait_for(I915_READ(dslreg) != temp, 5))
3287
			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3288
	}
3289
}
3290
 
2327 Serge 3291
static void ironlake_crtc_enable(struct drm_crtc *crtc)
3292
{
3293
    struct drm_device *dev = crtc->dev;
3294
    struct drm_i915_private *dev_priv = dev->dev_private;
3295
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3296
	struct intel_encoder *encoder;
2327 Serge 3297
    int pipe = intel_crtc->pipe;
3298
    int plane = intel_crtc->plane;
3299
    u32 temp;
3300
    bool is_pch_port;
3301
 
3031 serge 3302
	WARN_ON(!crtc->enabled);
3303
 
2327 Serge 3304
    if (intel_crtc->active)
3305
        return;
3306
 
3307
    intel_crtc->active = true;
3308
    intel_update_watermarks(dev);
3309
 
3310
    if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3311
        temp = I915_READ(PCH_LVDS);
3312
        if ((temp & LVDS_PORT_EN) == 0)
3313
            I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3314
    }
3315
 
3243 Serge 3316
	is_pch_port = ironlake_crtc_driving_pch(crtc);
2327 Serge 3317
 
3031 serge 3318
	if (is_pch_port) {
3243 Serge 3319
		/* Note: FDI PLL enabling _must_ be done before we enable the
3320
		 * cpu pipes, hence this is separate from all the other fdi/pch
3321
		 * enabling. */
3031 serge 3322
		ironlake_fdi_pll_enable(intel_crtc);
3323
	} else {
3324
		assert_fdi_tx_disabled(dev_priv, pipe);
3325
		assert_fdi_rx_disabled(dev_priv, pipe);
3326
	}
2327 Serge 3327
 
3031 serge 3328
	for_each_encoder_on_crtc(dev, crtc, encoder)
3329
		if (encoder->pre_enable)
3330
			encoder->pre_enable(encoder);
3331
 
2327 Serge 3332
    /* Enable panel fitting for LVDS */
3333
    if (dev_priv->pch_pf_size &&
3243 Serge 3334
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
3335
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2327 Serge 3336
        /* Force use of hard-coded filter coefficients
3337
         * as some pre-programmed values are broken,
3338
         * e.g. x201.
3339
         */
3243 Serge 3340
		if (IS_IVYBRIDGE(dev))
3341
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3342
						 PF_PIPE_SEL_IVB(pipe));
3343
		else
2327 Serge 3344
        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3345
        I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3346
        I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3347
    }
3348
 
3349
    /*
3350
     * On ILK+ LUT must be loaded before the pipe is running but with
3351
     * clocks enabled
3352
     */
3353
    intel_crtc_load_lut(crtc);
3354
 
3355
    intel_enable_pipe(dev_priv, pipe, is_pch_port);
3356
    intel_enable_plane(dev_priv, plane, pipe);
3357
 
3358
    if (is_pch_port)
3359
        ironlake_pch_enable(crtc);
3360
 
3361
    mutex_lock(&dev->struct_mutex);
3362
    intel_update_fbc(dev);
3363
    mutex_unlock(&dev->struct_mutex);
3364
 
3365
//    intel_crtc_update_cursor(crtc, true);
3031 serge 3366
 
3367
	for_each_encoder_on_crtc(dev, crtc, encoder)
3368
		encoder->enable(encoder);
3369
 
3370
	if (HAS_PCH_CPT(dev))
3371
		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3372
 
3373
	/*
3374
	 * There seems to be a race in PCH platform hw (at least on some
3375
	 * outputs) where an enabled pipe still completes any pageflip right
3376
	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3377
	 * as the first vblank happend, everything works as expected. Hence just
3378
	 * wait for one vblank before returning to avoid strange things
3379
	 * happening.
3380
	 */
3381
	intel_wait_for_vblank(dev, intel_crtc->pipe);
2327 Serge 3382
}
3383
 
3243 Serge 3384
static void haswell_crtc_enable(struct drm_crtc *crtc)
3385
{
3386
	struct drm_device *dev = crtc->dev;
3387
	struct drm_i915_private *dev_priv = dev->dev_private;
3388
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3389
	struct intel_encoder *encoder;
3390
	int pipe = intel_crtc->pipe;
3391
	int plane = intel_crtc->plane;
3392
	bool is_pch_port;
3393
 
3394
	WARN_ON(!crtc->enabled);
3395
 
3396
	if (intel_crtc->active)
3397
		return;
3398
 
3399
	intel_crtc->active = true;
3400
	intel_update_watermarks(dev);
3401
 
3402
	is_pch_port = haswell_crtc_driving_pch(crtc);
3403
 
3404
	if (is_pch_port)
3405
		dev_priv->display.fdi_link_train(crtc);
3406
 
3407
	for_each_encoder_on_crtc(dev, crtc, encoder)
3408
		if (encoder->pre_enable)
3409
			encoder->pre_enable(encoder);
3410
 
3411
	intel_ddi_enable_pipe_clock(intel_crtc);
3412
 
3413
	/* Enable panel fitting for eDP */
3414
	if (dev_priv->pch_pf_size &&
3415
	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
3416
		/* Force use of hard-coded filter coefficients
3417
		 * as some pre-programmed values are broken,
3418
		 * e.g. x201.
3419
		 */
3420
		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3421
					 PF_PIPE_SEL_IVB(pipe));
3422
		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3423
		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3424
	}
3425
 
3426
	/*
3427
	 * On ILK+ LUT must be loaded before the pipe is running but with
3428
	 * clocks enabled
3429
	 */
3430
	intel_crtc_load_lut(crtc);
3431
 
3432
	intel_ddi_set_pipe_settings(crtc);
3433
	intel_ddi_enable_pipe_func(crtc);
3434
 
3435
	intel_enable_pipe(dev_priv, pipe, is_pch_port);
3436
	intel_enable_plane(dev_priv, plane, pipe);
3437
 
3438
	if (is_pch_port)
3439
		lpt_pch_enable(crtc);
3440
 
3441
	mutex_lock(&dev->struct_mutex);
3442
	intel_update_fbc(dev);
3443
	mutex_unlock(&dev->struct_mutex);
3444
 
3445
//	intel_crtc_update_cursor(crtc, true);
3446
 
3447
	for_each_encoder_on_crtc(dev, crtc, encoder)
3448
		encoder->enable(encoder);
3449
 
3450
	/*
3451
	 * There seems to be a race in PCH platform hw (at least on some
3452
	 * outputs) where an enabled pipe still completes any pageflip right
3453
	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3454
	 * as the first vblank happend, everything works as expected. Hence just
3455
	 * wait for one vblank before returning to avoid strange things
3456
	 * happening.
3457
	 */
3458
	intel_wait_for_vblank(dev, intel_crtc->pipe);
3459
}
3460
 
2327 Serge 3461
static void ironlake_crtc_disable(struct drm_crtc *crtc)
3462
{
3463
    struct drm_device *dev = crtc->dev;
3464
    struct drm_i915_private *dev_priv = dev->dev_private;
3465
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3466
	struct intel_encoder *encoder;
2327 Serge 3467
    int pipe = intel_crtc->pipe;
3468
    int plane = intel_crtc->plane;
3469
    u32 reg, temp;
3470
 
3031 serge 3471
 
2327 Serge 3472
    if (!intel_crtc->active)
3473
        return;
3474
 
3031 serge 3475
	for_each_encoder_on_crtc(dev, crtc, encoder)
3476
		encoder->disable(encoder);
2336 Serge 3477
 
3031 serge 3478
//    intel_crtc_wait_for_pending_flips(crtc);
2327 Serge 3479
//    drm_vblank_off(dev, pipe);
3480
//    intel_crtc_update_cursor(crtc, false);
3481
 
3482
    intel_disable_plane(dev_priv, plane, pipe);
3483
 
3484
    if (dev_priv->cfb_plane == plane)
3485
        intel_disable_fbc(dev);
3486
 
3487
    intel_disable_pipe(dev_priv, pipe);
3488
 
3489
    /* Disable PF */
3490
    I915_WRITE(PF_CTL(pipe), 0);
3491
    I915_WRITE(PF_WIN_SZ(pipe), 0);
3492
 
3031 serge 3493
	for_each_encoder_on_crtc(dev, crtc, encoder)
3494
		if (encoder->post_disable)
3495
			encoder->post_disable(encoder);
3496
 
2327 Serge 3497
    ironlake_fdi_disable(crtc);
3498
 
3243 Serge 3499
	ironlake_disable_pch_transcoder(dev_priv, pipe);
2327 Serge 3500
 
3501
    if (HAS_PCH_CPT(dev)) {
3502
        /* disable TRANS_DP_CTL */
3503
        reg = TRANS_DP_CTL(pipe);
3504
        temp = I915_READ(reg);
3505
        temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3506
        temp |= TRANS_DP_PORT_SEL_NONE;
3507
        I915_WRITE(reg, temp);
3508
 
3509
        /* disable DPLL_SEL */
3510
        temp = I915_READ(PCH_DPLL_SEL);
3511
        switch (pipe) {
3512
        case 0:
2342 Serge 3513
			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
2327 Serge 3514
            break;
3515
        case 1:
3516
            temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3517
            break;
3518
        case 2:
2342 Serge 3519
			/* C shares PLL A or B */
2327 Serge 3520
            temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3521
            break;
3522
        default:
3523
            BUG(); /* wtf */
3524
        }
3525
        I915_WRITE(PCH_DPLL_SEL, temp);
3526
    }
3527
 
3528
    /* disable PCH DPLL */
3031 serge 3529
	intel_disable_pch_pll(intel_crtc);
2327 Serge 3530
 
3031 serge 3531
	ironlake_fdi_pll_disable(intel_crtc);
2327 Serge 3532
 
3533
    intel_crtc->active = false;
3534
    intel_update_watermarks(dev);
3535
 
3536
    mutex_lock(&dev->struct_mutex);
3537
    intel_update_fbc(dev);
3538
    mutex_unlock(&dev->struct_mutex);
3539
}
3540
 
3243 Serge 3541
static void haswell_crtc_disable(struct drm_crtc *crtc)
3542
{
3543
	struct drm_device *dev = crtc->dev;
3544
	struct drm_i915_private *dev_priv = dev->dev_private;
3545
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3546
	struct intel_encoder *encoder;
3547
	int pipe = intel_crtc->pipe;
3548
	int plane = intel_crtc->plane;
3549
	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3550
	bool is_pch_port;
3551
 
3552
	if (!intel_crtc->active)
3553
		return;
3554
 
3555
	is_pch_port = haswell_crtc_driving_pch(crtc);
3556
 
3557
	for_each_encoder_on_crtc(dev, crtc, encoder)
3558
		encoder->disable(encoder);
3559
 
3560
 
3561
	intel_disable_plane(dev_priv, plane, pipe);
3562
 
3563
	if (dev_priv->cfb_plane == plane)
3564
		intel_disable_fbc(dev);
3565
 
3566
	intel_disable_pipe(dev_priv, pipe);
3567
 
3568
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3569
 
3570
	/* Disable PF */
3571
	I915_WRITE(PF_CTL(pipe), 0);
3572
	I915_WRITE(PF_WIN_SZ(pipe), 0);
3573
 
3574
	intel_ddi_disable_pipe_clock(intel_crtc);
3575
 
3576
	for_each_encoder_on_crtc(dev, crtc, encoder)
3577
		if (encoder->post_disable)
3578
			encoder->post_disable(encoder);
3579
 
3580
	if (is_pch_port) {
3581
		lpt_disable_pch_transcoder(dev_priv);
3582
		intel_ddi_fdi_disable(crtc);
3583
	}
3584
 
3585
	intel_crtc->active = false;
3586
	intel_update_watermarks(dev);
3587
 
3588
	mutex_lock(&dev->struct_mutex);
3589
	intel_update_fbc(dev);
3590
	mutex_unlock(&dev->struct_mutex);
3591
}
3592
 
3031 serge 3593
static void ironlake_crtc_off(struct drm_crtc *crtc)
2327 Serge 3594
{
3595
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3596
	intel_put_pch_pll(intel_crtc);
2327 Serge 3597
}
3598
 
3243 Serge 3599
static void haswell_crtc_off(struct drm_crtc *crtc)
3600
{
3601
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3602
 
3603
	/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3604
	 * start using it. */
3605
	intel_crtc->cpu_transcoder = intel_crtc->pipe;
3606
 
3607
	intel_ddi_put_crtc_pll(crtc);
3608
}
3609
 
2327 Serge 3610
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3611
{
3612
	if (!enable && intel_crtc->overlay) {
3613
		struct drm_device *dev = intel_crtc->base.dev;
3614
		struct drm_i915_private *dev_priv = dev->dev_private;
3615
 
3616
		mutex_lock(&dev->struct_mutex);
3617
		dev_priv->mm.interruptible = false;
3618
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3619
		dev_priv->mm.interruptible = true;
3620
		mutex_unlock(&dev->struct_mutex);
3621
	}
3622
 
3623
	/* Let userspace switch the overlay on again. In most cases userspace
3624
	 * has to recompute where to put it anyway.
3625
	 */
3626
}
3627
 
3628
static void i9xx_crtc_enable(struct drm_crtc *crtc)
3629
{
3630
    struct drm_device *dev = crtc->dev;
3631
    struct drm_i915_private *dev_priv = dev->dev_private;
3632
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3633
	struct intel_encoder *encoder;
2327 Serge 3634
    int pipe = intel_crtc->pipe;
3635
    int plane = intel_crtc->plane;
3636
 
3031 serge 3637
	WARN_ON(!crtc->enabled);
3638
 
2327 Serge 3639
    if (intel_crtc->active)
3640
        return;
3641
 
3642
    intel_crtc->active = true;
3643
    intel_update_watermarks(dev);
3644
 
3645
    intel_enable_pll(dev_priv, pipe);
3646
    intel_enable_pipe(dev_priv, pipe, false);
3647
    intel_enable_plane(dev_priv, plane, pipe);
3648
 
3649
    intel_crtc_load_lut(crtc);
3650
    intel_update_fbc(dev);
3651
 
3652
    /* Give the overlay scaler a chance to enable if it's on this pipe */
3653
    intel_crtc_dpms_overlay(intel_crtc, true);
3654
//    intel_crtc_update_cursor(crtc, true);
3031 serge 3655
 
3656
	for_each_encoder_on_crtc(dev, crtc, encoder)
3657
		encoder->enable(encoder);
2327 Serge 3658
}
3659
 
3660
static void i9xx_crtc_disable(struct drm_crtc *crtc)
3661
{
3662
    struct drm_device *dev = crtc->dev;
3663
    struct drm_i915_private *dev_priv = dev->dev_private;
3664
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3031 serge 3665
	struct intel_encoder *encoder;
2327 Serge 3666
    int pipe = intel_crtc->pipe;
3667
    int plane = intel_crtc->plane;
3668
 
3031 serge 3669
 
2327 Serge 3670
    if (!intel_crtc->active)
3671
        return;
3672
 
3031 serge 3673
	for_each_encoder_on_crtc(dev, crtc, encoder)
3674
		encoder->disable(encoder);
3675
 
2327 Serge 3676
    /* Give the overlay scaler a chance to disable if it's on this pipe */
3031 serge 3677
//    intel_crtc_wait_for_pending_flips(crtc);
2327 Serge 3678
//    drm_vblank_off(dev, pipe);
3679
    intel_crtc_dpms_overlay(intel_crtc, false);
3680
//    intel_crtc_update_cursor(crtc, false);
3681
 
3682
    if (dev_priv->cfb_plane == plane)
3683
        intel_disable_fbc(dev);
3684
 
3685
    intel_disable_plane(dev_priv, plane, pipe);
3686
    intel_disable_pipe(dev_priv, pipe);
3687
    intel_disable_pll(dev_priv, pipe);
3688
 
3689
    intel_crtc->active = false;
3690
    intel_update_fbc(dev);
3691
    intel_update_watermarks(dev);
3692
}
3693
 
3031 serge 3694
static void i9xx_crtc_off(struct drm_crtc *crtc)
2327 Serge 3695
{
3696
}
3697
 
3031 serge 3698
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3699
				    bool enabled)
2330 Serge 3700
{
3701
	struct drm_device *dev = crtc->dev;
3702
	struct drm_i915_master_private *master_priv;
3703
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3704
	int pipe = intel_crtc->pipe;
2327 Serge 3705
 
3706
 
2340 Serge 3707
#if 0
2330 Serge 3708
	if (!dev->primary->master)
3709
		return;
2327 Serge 3710
 
2330 Serge 3711
	master_priv = dev->primary->master->driver_priv;
3712
	if (!master_priv->sarea_priv)
3713
		return;
2327 Serge 3714
 
2330 Serge 3715
	switch (pipe) {
3716
	case 0:
3717
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3718
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3719
		break;
3720
	case 1:
3721
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3722
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3723
		break;
3724
	default:
3725
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3726
		break;
3727
	}
2340 Serge 3728
#endif
3729
 
2330 Serge 3730
}
2327 Serge 3731
 
3031 serge 3732
/**
3733
 * Sets the power management mode of the pipe and plane.
3734
 */
3735
void intel_crtc_update_dpms(struct drm_crtc *crtc)
3736
{
3737
	struct drm_device *dev = crtc->dev;
3738
	struct drm_i915_private *dev_priv = dev->dev_private;
3739
	struct intel_encoder *intel_encoder;
3740
	bool enable = false;
3741
 
3742
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3743
		enable |= intel_encoder->connectors_active;
3744
 
3745
	if (enable)
3746
		dev_priv->display.crtc_enable(crtc);
3747
	else
3748
		dev_priv->display.crtc_disable(crtc);
3749
 
3750
	intel_crtc_update_sarea(crtc, enable);
3751
}
3752
 
3753
static void intel_crtc_noop(struct drm_crtc *crtc)
3754
{
3755
}
3756
 
2330 Serge 3757
static void intel_crtc_disable(struct drm_crtc *crtc)
3758
{
3759
	struct drm_device *dev = crtc->dev;
3031 serge 3760
	struct drm_connector *connector;
3761
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 3762
 
3031 serge 3763
	/* crtc should still be enabled when we disable it. */
3764
	WARN_ON(!crtc->enabled);
2327 Serge 3765
 
3031 serge 3766
	dev_priv->display.crtc_disable(crtc);
3767
	intel_crtc_update_sarea(crtc, false);
3768
	dev_priv->display.off(crtc);
3769
 
3770
	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3771
	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3772
 
3773
//	if (crtc->fb) {
3774
//		mutex_lock(&dev->struct_mutex);
3775
//		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3776
//		mutex_unlock(&dev->struct_mutex);
3777
//		crtc->fb = NULL;
3778
//	}
3779
 
3780
	/* Update computed state. */
3781
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3782
		if (!connector->encoder || !connector->encoder->crtc)
3783
			continue;
3784
 
3785
		if (connector->encoder->crtc != crtc)
3786
			continue;
3787
 
3788
		connector->dpms = DRM_MODE_DPMS_OFF;
3789
		to_intel_encoder(connector->encoder)->connectors_active = false;
2330 Serge 3790
	}
3791
}
2327 Serge 3792
 
3031 serge 3793
void intel_modeset_disable(struct drm_device *dev)
2330 Serge 3794
{
3031 serge 3795
	struct drm_crtc *crtc;
3796
 
3797
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3798
		if (crtc->enabled)
3799
			intel_crtc_disable(crtc);
3800
	}
2330 Serge 3801
}
2327 Serge 3802
 
3031 serge 3803
void intel_encoder_noop(struct drm_encoder *encoder)
2330 Serge 3804
{
3805
}
2327 Serge 3806
 
3031 serge 3807
void intel_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 3808
{
3031 serge 3809
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3810
 
3811
	drm_encoder_cleanup(encoder);
3812
	kfree(intel_encoder);
2330 Serge 3813
}
2327 Serge 3814
 
3031 serge 3815
/* Simple dpms helper for encodres with just one connector, no cloning and only
3816
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3817
 * state of the entire output pipe. */
3818
void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
2330 Serge 3819
{
3031 serge 3820
	if (mode == DRM_MODE_DPMS_ON) {
3821
		encoder->connectors_active = true;
3822
 
3823
		intel_crtc_update_dpms(encoder->base.crtc);
3824
	} else {
3825
		encoder->connectors_active = false;
3826
 
3827
		intel_crtc_update_dpms(encoder->base.crtc);
3828
	}
2330 Serge 3829
}
2327 Serge 3830
 
3031 serge 3831
/* Cross check the actual hw state with our own modeset state tracking (and it's
3832
 * internal consistency). */
3833
static void intel_connector_check_state(struct intel_connector *connector)
2330 Serge 3834
{
3031 serge 3835
	if (connector->get_hw_state(connector)) {
3836
		struct intel_encoder *encoder = connector->encoder;
3837
		struct drm_crtc *crtc;
3838
		bool encoder_enabled;
3839
		enum pipe pipe;
3840
 
3841
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3842
			      connector->base.base.id,
3843
			      drm_get_connector_name(&connector->base));
3844
 
3845
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
3846
		     "wrong connector dpms state\n");
3847
		WARN(connector->base.encoder != &encoder->base,
3848
		     "active connector not linked to encoder\n");
3849
		WARN(!encoder->connectors_active,
3850
		     "encoder->connectors_active not set\n");
3851
 
3852
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
3853
		WARN(!encoder_enabled, "encoder not enabled\n");
3854
		if (WARN_ON(!encoder->base.crtc))
3855
			return;
3856
 
3857
		crtc = encoder->base.crtc;
3858
 
3859
		WARN(!crtc->enabled, "crtc not enabled\n");
3860
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
3861
		WARN(pipe != to_intel_crtc(crtc)->pipe,
3862
		     "encoder active on the wrong pipe\n");
3863
	}
2330 Serge 3864
}
2327 Serge 3865
 
3031 serge 3866
/* Even simpler default implementation, if there's really no special case to
3867
 * consider. */
3868
void intel_connector_dpms(struct drm_connector *connector, int mode)
2330 Serge 3869
{
3031 serge 3870
	struct intel_encoder *encoder = intel_attached_encoder(connector);
2342 Serge 3871
 
3031 serge 3872
	/* All the simple cases only support two dpms states. */
3873
	if (mode != DRM_MODE_DPMS_ON)
3874
		mode = DRM_MODE_DPMS_OFF;
2342 Serge 3875
 
3031 serge 3876
	if (mode == connector->dpms)
3877
		return;
3878
 
3879
	connector->dpms = mode;
3880
 
3881
	/* Only need to change hw state when actually enabled */
3882
	if (encoder->base.crtc)
3883
		intel_encoder_dpms(encoder, mode);
3884
	else
3885
		WARN_ON(encoder->connectors_active != false);
3886
 
3887
	intel_modeset_check_state(connector->dev);
2330 Serge 3888
}
2327 Serge 3889
 
3031 serge 3890
/* Simple connector->get_hw_state implementation for encoders that support only
3891
 * one connector and no cloning and hence the encoder state determines the state
3892
 * of the connector. */
3893
bool intel_connector_get_hw_state(struct intel_connector *connector)
2330 Serge 3894
{
3031 serge 3895
	enum pipe pipe = 0;
3896
	struct intel_encoder *encoder = connector->encoder;
2330 Serge 3897
 
3031 serge 3898
	return encoder->get_hw_state(encoder, &pipe);
2330 Serge 3899
}
3900
 
3901
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3031 serge 3902
				  const struct drm_display_mode *mode,
2330 Serge 3903
				  struct drm_display_mode *adjusted_mode)
3904
{
3905
	struct drm_device *dev = crtc->dev;
3906
 
3907
	if (HAS_PCH_SPLIT(dev)) {
3908
		/* FDI link clock is fixed at 2.7G */
3909
		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3910
			return false;
3911
	}
3912
 
3031 serge 3913
	/* All interlaced capable intel hw wants timings in frames. Note though
3914
	 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3915
	 * timings, so we need to be careful not to clobber these.*/
3916
	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
2330 Serge 3917
		drm_mode_set_crtcinfo(adjusted_mode, 0);
3918
 
3031 serge 3919
	/* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
3920
	 * with a hsync front porch of 0.
3921
	 */
3922
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
3923
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
3924
		return false;
3925
 
2330 Serge 3926
	return true;
3927
}
3928
 
3031 serge 3929
static int valleyview_get_display_clock_speed(struct drm_device *dev)
3930
{
3931
	return 400000; /* FIXME */
3932
}
3933
 
2327 Serge 3934
static int i945_get_display_clock_speed(struct drm_device *dev)
3935
{
3936
	return 400000;
3937
}
3938
 
3939
static int i915_get_display_clock_speed(struct drm_device *dev)
3940
{
3941
	return 333000;
3942
}
3943
 
3944
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3945
{
3946
	return 200000;
3947
}
3948
 
3949
static int i915gm_get_display_clock_speed(struct drm_device *dev)
3950
{
3951
	u16 gcfgc = 0;
3952
 
3953
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3954
 
3955
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3956
		return 133000;
3957
	else {
3958
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3959
		case GC_DISPLAY_CLOCK_333_MHZ:
3960
			return 333000;
3961
		default:
3962
		case GC_DISPLAY_CLOCK_190_200_MHZ:
3963
			return 190000;
3964
		}
3965
	}
3966
}
3967
 
3968
static int i865_get_display_clock_speed(struct drm_device *dev)
3969
{
3970
	return 266000;
3971
}
3972
 
3973
static int i855_get_display_clock_speed(struct drm_device *dev)
3974
{
3975
	u16 hpllcc = 0;
3976
	/* Assume that the hardware is in the high speed state.  This
3977
	 * should be the default.
3978
	 */
3979
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3980
	case GC_CLOCK_133_200:
3981
	case GC_CLOCK_100_200:
3982
		return 200000;
3983
	case GC_CLOCK_166_250:
3984
		return 250000;
3985
	case GC_CLOCK_100_133:
3986
		return 133000;
3987
	}
3988
 
3989
	/* Shouldn't happen */
3990
	return 0;
3991
}
3992
 
3993
static int i830_get_display_clock_speed(struct drm_device *dev)
3994
{
3995
	return 133000;
3996
}
3997
 
3998
struct fdi_m_n {
3999
    u32        tu;
4000
    u32        gmch_m;
4001
    u32        gmch_n;
4002
    u32        link_m;
4003
    u32        link_n;
4004
};
4005
 
4006
static void
4007
fdi_reduce_ratio(u32 *num, u32 *den)
4008
{
4009
	while (*num > 0xffffff || *den > 0xffffff) {
4010
		*num >>= 1;
4011
		*den >>= 1;
4012
	}
4013
}
4014
 
4015
static void
4016
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
4017
		     int link_clock, struct fdi_m_n *m_n)
4018
{
4019
	m_n->tu = 64; /* default size */
4020
 
4021
	/* BUG_ON(pixel_clock > INT_MAX / 36); */
4022
	m_n->gmch_m = bits_per_pixel * pixel_clock;
4023
	m_n->gmch_n = link_clock * nlanes * 8;
4024
	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
4025
 
4026
	m_n->link_m = pixel_clock;
4027
	m_n->link_n = link_clock;
4028
	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
4029
}
4030
 
4031
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4032
{
2342 Serge 4033
	if (i915_panel_use_ssc >= 0)
4034
		return i915_panel_use_ssc != 0;
4035
	return dev_priv->lvds_use_ssc
2327 Serge 4036
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4037
}
4038
 
4039
/**
4040
 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4041
 * @crtc: CRTC structure
2342 Serge 4042
 * @mode: requested mode
2327 Serge 4043
 *
4044
 * A pipe may be connected to one or more outputs.  Based on the depth of the
4045
 * attached framebuffer, choose a good color depth to use on the pipe.
4046
 *
4047
 * If possible, match the pipe depth to the fb depth.  In some cases, this
4048
 * isn't ideal, because the connected output supports a lesser or restricted
4049
 * set of depths.  Resolve that here:
4050
 *    LVDS typically supports only 6bpc, so clamp down in that case
4051
 *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4052
 *    Displays may support a restricted set as well, check EDID and clamp as
4053
 *      appropriate.
2342 Serge 4054
 *    DP may want to dither down to 6bpc to fit larger modes
2327 Serge 4055
 *
4056
 * RETURNS:
4057
 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4058
 * true if they don't match).
4059
 */
4060
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3031 serge 4061
					 struct drm_framebuffer *fb,
2342 Serge 4062
					 unsigned int *pipe_bpp,
4063
					 struct drm_display_mode *mode)
2327 Serge 4064
{
4065
	struct drm_device *dev = crtc->dev;
4066
	struct drm_i915_private *dev_priv = dev->dev_private;
4067
	struct drm_connector *connector;
3031 serge 4068
	struct intel_encoder *intel_encoder;
2327 Serge 4069
	unsigned int display_bpc = UINT_MAX, bpc;
4070
 
4071
	/* Walk the encoders & connectors on this crtc, get min bpc */
3031 serge 4072
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2327 Serge 4073
 
4074
		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4075
			unsigned int lvds_bpc;
4076
 
4077
			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4078
			    LVDS_A3_POWER_UP)
4079
				lvds_bpc = 8;
4080
			else
4081
				lvds_bpc = 6;
4082
 
4083
			if (lvds_bpc < display_bpc) {
2342 Serge 4084
				DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
2327 Serge 4085
				display_bpc = lvds_bpc;
4086
			}
4087
			continue;
4088
		}
4089
 
4090
		/* Not one of the known troublemakers, check the EDID */
4091
		list_for_each_entry(connector, &dev->mode_config.connector_list,
4092
				    head) {
3031 serge 4093
			if (connector->encoder != &intel_encoder->base)
2327 Serge 4094
				continue;
4095
 
4096
			/* Don't use an invalid EDID bpc value */
4097
			if (connector->display_info.bpc &&
4098
			    connector->display_info.bpc < display_bpc) {
2342 Serge 4099
				DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
2327 Serge 4100
				display_bpc = connector->display_info.bpc;
3031 serge 4101
        }
4102
    }
2327 Serge 4103
 
3120 serge 4104
		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4105
			/* Use VBT settings if we have an eDP panel */
4106
			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4107
 
3243 Serge 4108
			if (edp_bpc && edp_bpc < display_bpc) {
3120 serge 4109
				DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4110
				display_bpc = edp_bpc;
4111
			}
4112
			continue;
4113
		}
4114
 
2327 Serge 4115
		/*
4116
		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4117
		 * through, clamp it down.  (Note: >12bpc will be caught below.)
4118
		 */
4119
		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4120
			if (display_bpc > 8 && display_bpc < 12) {
2342 Serge 4121
				DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
2327 Serge 4122
				display_bpc = 12;
4123
			} else {
2342 Serge 4124
				DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
2327 Serge 4125
				display_bpc = 8;
4126
			}
4127
		}
4128
	}
4129
 
2342 Serge 4130
	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4131
		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
4132
		display_bpc = 6;
4133
	}
4134
 
2327 Serge 4135
	/*
4136
	 * We could just drive the pipe at the highest bpc all the time and
4137
	 * enable dithering as needed, but that costs bandwidth.  So choose
4138
	 * the minimum value that expresses the full color range of the fb but
4139
	 * also stays within the max display bpc discovered above.
4140
	 */
4141
 
3031 serge 4142
	switch (fb->depth) {
2327 Serge 4143
	case 8:
4144
		bpc = 8; /* since we go through a colormap */
4145
		break;
4146
	case 15:
4147
	case 16:
4148
		bpc = 6; /* min is 18bpp */
4149
		break;
4150
	case 24:
2342 Serge 4151
		bpc = 8;
2327 Serge 4152
		break;
4153
	case 30:
2342 Serge 4154
		bpc = 10;
2327 Serge 4155
		break;
4156
	case 48:
2342 Serge 4157
		bpc = 12;
2327 Serge 4158
		break;
4159
	default:
4160
		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4161
		bpc = min((unsigned int)8, display_bpc);
4162
		break;
4163
	}
4164
 
2342 Serge 4165
	display_bpc = min(display_bpc, bpc);
4166
 
4167
	DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
3031 serge 4168
		      bpc, display_bpc);
2327 Serge 4169
 
2342 Serge 4170
	*pipe_bpp = display_bpc * 3;
2327 Serge 4171
 
4172
	return display_bpc != bpc;
4173
}
4174
 
3031 serge 4175
static int vlv_get_refclk(struct drm_crtc *crtc)
2327 Serge 4176
{
3031 serge 4177
	struct drm_device *dev = crtc->dev;
4178
	struct drm_i915_private *dev_priv = dev->dev_private;
4179
	int refclk = 27000; /* for DP & HDMI */
2327 Serge 4180
 
3031 serge 4181
	return 100000; /* only one validated so far */
2327 Serge 4182
 
3031 serge 4183
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
4184
		refclk = 96000;
4185
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4186
		if (intel_panel_use_ssc(dev_priv))
4187
			refclk = 100000;
4188
		else
4189
			refclk = 96000;
4190
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4191
		refclk = 100000;
4192
	}
2327 Serge 4193
 
3031 serge 4194
	return refclk;
4195
}
2327 Serge 4196
 
3031 serge 4197
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4198
{
4199
	struct drm_device *dev = crtc->dev;
4200
	struct drm_i915_private *dev_priv = dev->dev_private;
4201
	int refclk;
2327 Serge 4202
 
3031 serge 4203
	if (IS_VALLEYVIEW(dev)) {
4204
		refclk = vlv_get_refclk(crtc);
4205
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4206
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4207
		refclk = dev_priv->lvds_ssc_freq * 1000;
4208
		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4209
			      refclk / 1000);
4210
	} else if (!IS_GEN2(dev)) {
4211
		refclk = 96000;
4212
	} else {
4213
		refclk = 48000;
4214
	}
2327 Serge 4215
 
3031 serge 4216
	return refclk;
4217
}
2327 Serge 4218
 
3031 serge 4219
static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
4220
				      intel_clock_t *clock)
4221
{
4222
	/* SDVO TV has fixed PLL values depend on its clock range,
4223
	   this mirrors vbios setting. */
4224
	if (adjusted_mode->clock >= 100000
4225
	    && adjusted_mode->clock < 140500) {
4226
		clock->p1 = 2;
4227
		clock->p2 = 10;
4228
		clock->n = 3;
4229
		clock->m1 = 16;
4230
		clock->m2 = 8;
4231
	} else if (adjusted_mode->clock >= 140500
4232
		   && adjusted_mode->clock <= 200000) {
4233
		clock->p1 = 1;
4234
		clock->p2 = 10;
4235
		clock->n = 6;
4236
		clock->m1 = 12;
4237
		clock->m2 = 8;
4238
	}
4239
}
2327 Serge 4240
 
3031 serge 4241
static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
4242
				     intel_clock_t *clock,
4243
				     intel_clock_t *reduced_clock)
4244
{
4245
	struct drm_device *dev = crtc->dev;
4246
	struct drm_i915_private *dev_priv = dev->dev_private;
4247
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4248
	int pipe = intel_crtc->pipe;
4249
	u32 fp, fp2 = 0;
2327 Serge 4250
 
3031 serge 4251
	if (IS_PINEVIEW(dev)) {
4252
		fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
4253
		if (reduced_clock)
4254
			fp2 = (1 << reduced_clock->n) << 16 |
4255
				reduced_clock->m1 << 8 | reduced_clock->m2;
4256
	} else {
4257
		fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
4258
		if (reduced_clock)
4259
			fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
4260
				reduced_clock->m2;
4261
	}
2327 Serge 4262
 
3031 serge 4263
	I915_WRITE(FP0(pipe), fp);
2327 Serge 4264
 
3031 serge 4265
	intel_crtc->lowfreq_avail = false;
4266
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4267
	    reduced_clock && i915_powersave) {
4268
		I915_WRITE(FP1(pipe), fp2);
4269
		intel_crtc->lowfreq_avail = true;
4270
	} else {
4271
		I915_WRITE(FP1(pipe), fp);
4272
	}
4273
}
2327 Serge 4274
 
3031 serge 4275
static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
4276
			      struct drm_display_mode *adjusted_mode)
4277
{
4278
	struct drm_device *dev = crtc->dev;
4279
	struct drm_i915_private *dev_priv = dev->dev_private;
4280
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4281
	int pipe = intel_crtc->pipe;
4282
	u32 temp;
2327 Serge 4283
 
3031 serge 4284
	temp = I915_READ(LVDS);
4285
	temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4286
	if (pipe == 1) {
4287
		temp |= LVDS_PIPEB_SELECT;
4288
	} else {
4289
		temp &= ~LVDS_PIPEB_SELECT;
4290
	}
4291
	/* set the corresponsding LVDS_BORDER bit */
4292
	temp |= dev_priv->lvds_border_bits;
4293
	/* Set the B0-B3 data pairs corresponding to whether we're going to
4294
	 * set the DPLLs for dual-channel mode or not.
4295
	 */
4296
	if (clock->p2 == 7)
4297
		temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4298
	else
4299
		temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
2327 Serge 4300
 
3031 serge 4301
	/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4302
	 * appropriately here, but we need to look more thoroughly into how
4303
	 * panels behave in the two modes.
4304
	 */
4305
	/* set the dithering flag on LVDS as needed */
4306
	if (INTEL_INFO(dev)->gen >= 4) {
4307
		if (dev_priv->lvds_dither)
4308
			temp |= LVDS_ENABLE_DITHER;
4309
		else
4310
			temp &= ~LVDS_ENABLE_DITHER;
4311
	}
4312
	temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4313
	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4314
		temp |= LVDS_HSYNC_POLARITY;
4315
	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4316
		temp |= LVDS_VSYNC_POLARITY;
4317
	I915_WRITE(LVDS, temp);
4318
}
2327 Serge 4319
 
3031 serge 4320
static void vlv_update_pll(struct drm_crtc *crtc,
4321
			   struct drm_display_mode *mode,
4322
			   struct drm_display_mode *adjusted_mode,
4323
			   intel_clock_t *clock, intel_clock_t *reduced_clock,
3243 Serge 4324
			   int num_connectors)
3031 serge 4325
{
4326
	struct drm_device *dev = crtc->dev;
4327
	struct drm_i915_private *dev_priv = dev->dev_private;
4328
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4329
	int pipe = intel_crtc->pipe;
4330
	u32 dpll, mdiv, pdiv;
4331
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
3243 Serge 4332
	bool is_sdvo;
4333
	u32 temp;
2327 Serge 4334
 
3243 Serge 4335
	is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4336
		intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
2327 Serge 4337
 
3243 Serge 4338
	dpll = DPLL_VGA_MODE_DIS;
4339
	dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
4340
	dpll |= DPLL_REFA_CLK_ENABLE_VLV;
4341
	dpll |= DPLL_INTEGRATED_CLOCK_VLV;
4342
 
4343
	I915_WRITE(DPLL(pipe), dpll);
4344
	POSTING_READ(DPLL(pipe));
4345
 
3031 serge 4346
	bestn = clock->n;
4347
	bestm1 = clock->m1;
4348
	bestm2 = clock->m2;
4349
	bestp1 = clock->p1;
4350
	bestp2 = clock->p2;
4351
 
3243 Serge 4352
	/*
4353
	 * In Valleyview PLL and program lane counter registers are exposed
4354
	 * through DPIO interface
4355
	 */
3031 serge 4356
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4357
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4358
	mdiv |= ((bestn << DPIO_N_SHIFT));
4359
	mdiv |= (1 << DPIO_POST_DIV_SHIFT);
4360
	mdiv |= (1 << DPIO_K_SHIFT);
4361
	mdiv |= DPIO_ENABLE_CALIBRATION;
4362
	intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
4363
 
4364
	intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
4365
 
3243 Serge 4366
	pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
3031 serge 4367
		(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
3243 Serge 4368
		(7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
4369
		(5 << DPIO_CLK_BIAS_CTL_SHIFT);
3031 serge 4370
	intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
4371
 
3243 Serge 4372
	intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
3031 serge 4373
 
4374
	dpll |= DPLL_VCO_ENABLE;
4375
	I915_WRITE(DPLL(pipe), dpll);
4376
	POSTING_READ(DPLL(pipe));
4377
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4378
		DRM_ERROR("DPLL %d failed to lock\n", pipe);
4379
 
3243 Serge 4380
	intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
3031 serge 4381
 
3243 Serge 4382
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4383
		intel_dp_set_m_n(crtc, mode, adjusted_mode);
4384
 
4385
	I915_WRITE(DPLL(pipe), dpll);
4386
 
4387
	/* Wait for the clocks to stabilize. */
4388
	POSTING_READ(DPLL(pipe));
4389
	udelay(150);
4390
 
4391
	temp = 0;
4392
	if (is_sdvo) {
4393
		temp = intel_mode_get_pixel_multiplier(adjusted_mode);
3031 serge 4394
		if (temp > 1)
4395
			temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4396
		else
4397
			temp = 0;
3243 Serge 4398
	}
3031 serge 4399
		I915_WRITE(DPLL_MD(pipe), temp);
4400
		POSTING_READ(DPLL_MD(pipe));
3243 Serge 4401
 
4402
	/* Now program lane control registers */
4403
	if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
4404
			|| intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
4405
	{
4406
		temp = 0x1000C4;
4407
		if(pipe == 1)
4408
			temp |= (1 << 21);
4409
		intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
3031 serge 4410
	}
3243 Serge 4411
	if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
4412
	{
4413
		temp = 0x1000C4;
4414
		if(pipe == 1)
4415
			temp |= (1 << 21);
4416
		intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
4417
	}
3031 serge 4418
}
4419
 
4420
static void i9xx_update_pll(struct drm_crtc *crtc,
4421
			    struct drm_display_mode *mode,
4422
			    struct drm_display_mode *adjusted_mode,
4423
			    intel_clock_t *clock, intel_clock_t *reduced_clock,
4424
			    int num_connectors)
4425
{
4426
	struct drm_device *dev = crtc->dev;
4427
	struct drm_i915_private *dev_priv = dev->dev_private;
4428
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4429
	int pipe = intel_crtc->pipe;
4430
	u32 dpll;
4431
	bool is_sdvo;
4432
 
3243 Serge 4433
	i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4434
 
3031 serge 4435
	is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4436
		intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4437
 
4438
	dpll = DPLL_VGA_MODE_DIS;
4439
 
4440
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4441
		dpll |= DPLLB_MODE_LVDS;
4442
	else
4443
		dpll |= DPLLB_MODE_DAC_SERIAL;
4444
	if (is_sdvo) {
4445
		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4446
		if (pixel_multiplier > 1) {
4447
			if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4448
				dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
2342 Serge 4449
		}
3031 serge 4450
		dpll |= DPLL_DVO_HIGH_SPEED;
2342 Serge 4451
	}
3031 serge 4452
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4453
		dpll |= DPLL_DVO_HIGH_SPEED;
2342 Serge 4454
 
3031 serge 4455
	/* compute bitmask from p1 value */
4456
	if (IS_PINEVIEW(dev))
4457
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4458
	else {
4459
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4460
		if (IS_G4X(dev) && reduced_clock)
4461
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4462
	}
4463
	switch (clock->p2) {
4464
	case 5:
4465
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4466
		break;
4467
	case 7:
4468
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4469
		break;
4470
	case 10:
4471
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4472
		break;
4473
	case 14:
4474
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4475
		break;
4476
	}
4477
	if (INTEL_INFO(dev)->gen >= 4)
4478
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2327 Serge 4479
 
3031 serge 4480
	if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4481
		dpll |= PLL_REF_INPUT_TVCLKINBC;
4482
	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4483
		/* XXX: just matching BIOS for now */
4484
		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
4485
		dpll |= 3;
4486
	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4487
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4488
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4489
	else
4490
		dpll |= PLL_REF_INPUT_DREFCLK;
2327 Serge 4491
 
3031 serge 4492
	dpll |= DPLL_VCO_ENABLE;
4493
	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4494
	POSTING_READ(DPLL(pipe));
4495
	udelay(150);
2327 Serge 4496
 
3031 serge 4497
	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
4498
	 * This is an exception to the general rule that mode_set doesn't turn
4499
	 * things on.
4500
	 */
4501
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4502
		intel_update_lvds(crtc, clock, adjusted_mode);
2327 Serge 4503
 
3031 serge 4504
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4505
		intel_dp_set_m_n(crtc, mode, adjusted_mode);
2327 Serge 4506
 
3031 serge 4507
	I915_WRITE(DPLL(pipe), dpll);
2327 Serge 4508
 
3031 serge 4509
	/* Wait for the clocks to stabilize. */
4510
	POSTING_READ(DPLL(pipe));
4511
	udelay(150);
2327 Serge 4512
 
3031 serge 4513
	if (INTEL_INFO(dev)->gen >= 4) {
4514
		u32 temp = 0;
4515
		if (is_sdvo) {
4516
			temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4517
			if (temp > 1)
4518
				temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4519
			else
4520
				temp = 0;
4521
	}
4522
		I915_WRITE(DPLL_MD(pipe), temp);
4523
	} else {
4524
		/* The pixel multiplier can only be updated once the
4525
		 * DPLL is enabled and the clocks are stable.
4526
		 *
4527
		 * So write it again.
4528
		 */
4529
		I915_WRITE(DPLL(pipe), dpll);
4530
	}
4531
}
2327 Serge 4532
 
3031 serge 4533
static void i8xx_update_pll(struct drm_crtc *crtc,
4534
			    struct drm_display_mode *adjusted_mode,
3243 Serge 4535
			    intel_clock_t *clock, intel_clock_t *reduced_clock,
3031 serge 4536
			    int num_connectors)
4537
{
4538
	struct drm_device *dev = crtc->dev;
4539
	struct drm_i915_private *dev_priv = dev->dev_private;
4540
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4541
	int pipe = intel_crtc->pipe;
4542
	u32 dpll;
2327 Serge 4543
 
3243 Serge 4544
	i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4545
 
3031 serge 4546
	dpll = DPLL_VGA_MODE_DIS;
2327 Serge 4547
 
3031 serge 4548
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4549
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4550
	} else {
4551
		if (clock->p1 == 2)
4552
			dpll |= PLL_P1_DIVIDE_BY_TWO;
4553
		else
4554
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4555
		if (clock->p2 == 4)
4556
			dpll |= PLL_P2_DIVIDE_BY_4;
4557
	}
2327 Serge 4558
 
3031 serge 4559
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4560
		/* XXX: just matching BIOS for now */
4561
		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
4562
		dpll |= 3;
4563
	else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4564
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4565
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4566
	else
4567
		dpll |= PLL_REF_INPUT_DREFCLK;
4568
 
4569
	dpll |= DPLL_VCO_ENABLE;
4570
	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4571
	POSTING_READ(DPLL(pipe));
4572
	udelay(150);
4573
 
4574
	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
4575
	 * This is an exception to the general rule that mode_set doesn't turn
4576
	 * things on.
4577
	 */
4578
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4579
		intel_update_lvds(crtc, clock, adjusted_mode);
4580
 
4581
	I915_WRITE(DPLL(pipe), dpll);
4582
 
4583
	/* Wait for the clocks to stabilize. */
4584
	POSTING_READ(DPLL(pipe));
4585
	udelay(150);
4586
 
4587
	/* The pixel multiplier can only be updated once the
4588
	 * DPLL is enabled and the clocks are stable.
4589
	 *
4590
	 * So write it again.
4591
	 */
4592
	I915_WRITE(DPLL(pipe), dpll);
4593
}
4594
 
3243 Serge 4595
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
4596
				   struct drm_display_mode *mode,
4597
				   struct drm_display_mode *adjusted_mode)
4598
{
4599
	struct drm_device *dev = intel_crtc->base.dev;
4600
	struct drm_i915_private *dev_priv = dev->dev_private;
4601
	enum pipe pipe = intel_crtc->pipe;
4602
	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
4603
	uint32_t vsyncshift;
4604
 
4605
	if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4606
		/* the chip adds 2 halflines automatically */
4607
		adjusted_mode->crtc_vtotal -= 1;
4608
		adjusted_mode->crtc_vblank_end -= 1;
4609
		vsyncshift = adjusted_mode->crtc_hsync_start
4610
			     - adjusted_mode->crtc_htotal / 2;
4611
	} else {
4612
		vsyncshift = 0;
4613
	}
4614
 
4615
	if (INTEL_INFO(dev)->gen > 3)
4616
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
4617
 
4618
	I915_WRITE(HTOTAL(cpu_transcoder),
4619
		   (adjusted_mode->crtc_hdisplay - 1) |
4620
		   ((adjusted_mode->crtc_htotal - 1) << 16));
4621
	I915_WRITE(HBLANK(cpu_transcoder),
4622
		   (adjusted_mode->crtc_hblank_start - 1) |
4623
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
4624
	I915_WRITE(HSYNC(cpu_transcoder),
4625
		   (adjusted_mode->crtc_hsync_start - 1) |
4626
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
4627
 
4628
	I915_WRITE(VTOTAL(cpu_transcoder),
4629
		   (adjusted_mode->crtc_vdisplay - 1) |
4630
		   ((adjusted_mode->crtc_vtotal - 1) << 16));
4631
	I915_WRITE(VBLANK(cpu_transcoder),
4632
		   (adjusted_mode->crtc_vblank_start - 1) |
4633
		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
4634
	I915_WRITE(VSYNC(cpu_transcoder),
4635
		   (adjusted_mode->crtc_vsync_start - 1) |
4636
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
4637
 
4638
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4639
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4640
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4641
	 * bits. */
4642
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
4643
	    (pipe == PIPE_B || pipe == PIPE_C))
4644
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
4645
 
4646
	/* pipesrc controls the size that is scaled from, which should
4647
	 * always be the user's requested size.
4648
	 */
4649
	I915_WRITE(PIPESRC(pipe),
4650
		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4651
}
4652
 
3031 serge 4653
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4654
			      struct drm_display_mode *mode,
4655
			      struct drm_display_mode *adjusted_mode,
4656
			      int x, int y,
4657
			      struct drm_framebuffer *fb)
4658
{
4659
	struct drm_device *dev = crtc->dev;
4660
	struct drm_i915_private *dev_priv = dev->dev_private;
4661
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4662
	int pipe = intel_crtc->pipe;
4663
	int plane = intel_crtc->plane;
4664
	int refclk, num_connectors = 0;
4665
	intel_clock_t clock, reduced_clock;
3243 Serge 4666
	u32 dspcntr, pipeconf;
3031 serge 4667
	bool ok, has_reduced_clock = false, is_sdvo = false;
4668
	bool is_lvds = false, is_tv = false, is_dp = false;
4669
	struct intel_encoder *encoder;
4670
	const intel_limit_t *limit;
4671
	int ret;
4672
 
4673
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4674
		switch (encoder->type) {
4675
		case INTEL_OUTPUT_LVDS:
4676
			is_lvds = true;
4677
			break;
4678
		case INTEL_OUTPUT_SDVO:
4679
		case INTEL_OUTPUT_HDMI:
4680
			is_sdvo = true;
4681
			if (encoder->needs_tv_clock)
4682
				is_tv = true;
4683
			break;
4684
		case INTEL_OUTPUT_TVOUT:
4685
			is_tv = true;
4686
			break;
4687
		case INTEL_OUTPUT_DISPLAYPORT:
4688
			is_dp = true;
4689
			break;
4690
		}
4691
 
4692
		num_connectors++;
4693
	}
4694
 
4695
	refclk = i9xx_get_refclk(crtc, num_connectors);
4696
 
4697
	/*
4698
	 * Returns a set of divisors for the desired target clock with the given
4699
	 * refclk, or FALSE.  The returned values represent the clock equation:
4700
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4701
	 */
4702
	limit = intel_limit(crtc, refclk);
4703
	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4704
			     &clock);
4705
	if (!ok) {
4706
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
4707
		return -EINVAL;
4708
	}
4709
 
4710
	/* Ensure that the cursor is valid for the new mode before changing... */
4711
//   intel_crtc_update_cursor(crtc, true);
4712
 
4713
	if (is_lvds && dev_priv->lvds_downclock_avail) {
4714
		/*
4715
		 * Ensure we match the reduced clock's P to the target clock.
4716
		 * If the clocks don't match, we can't switch the display clock
4717
		 * by using the FP0/FP1. In such case we will disable the LVDS
4718
		 * downclock feature.
4719
		*/
4720
		has_reduced_clock = limit->find_pll(limit, crtc,
4721
						    dev_priv->lvds_downclock,
4722
						    refclk,
4723
						    &clock,
4724
						    &reduced_clock);
4725
	}
4726
 
4727
	if (is_sdvo && is_tv)
4728
		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
4729
 
4730
	if (IS_GEN2(dev))
3243 Serge 4731
		i8xx_update_pll(crtc, adjusted_mode, &clock,
4732
				has_reduced_clock ? &reduced_clock : NULL,
4733
				num_connectors);
3031 serge 4734
	else if (IS_VALLEYVIEW(dev))
3243 Serge 4735
		vlv_update_pll(crtc, mode, adjusted_mode, &clock,
4736
				has_reduced_clock ? &reduced_clock : NULL,
4737
				num_connectors);
3031 serge 4738
	else
4739
		i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4740
				has_reduced_clock ? &reduced_clock : NULL,
4741
				num_connectors);
4742
 
4743
	/* setup pipeconf */
4744
	pipeconf = I915_READ(PIPECONF(pipe));
4745
 
4746
	/* Set up the display plane register */
4747
	dspcntr = DISPPLANE_GAMMA_ENABLE;
4748
 
4749
	if (pipe == 0)
4750
		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4751
	else
4752
		dspcntr |= DISPPLANE_SEL_PIPE_B;
4753
 
4754
	if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4755
		/* Enable pixel doubling when the dot clock is > 90% of the (display)
4756
		 * core speed.
4757
		 *
4758
		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4759
		 * pipe == 0 check?
4760
		 */
4761
		if (mode->clock >
4762
		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4763
			pipeconf |= PIPECONF_DOUBLE_WIDE;
4764
		else
4765
			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4766
		}
4767
 
4768
	/* default to 8bpc */
4769
	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
4770
	if (is_dp) {
4771
		if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4772
			pipeconf |= PIPECONF_BPP_6 |
4773
				    PIPECONF_DITHER_EN |
4774
				    PIPECONF_DITHER_TYPE_SP;
4775
			}
4776
		}
4777
 
3243 Serge 4778
	if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4779
		if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4780
			pipeconf |= PIPECONF_BPP_6 |
4781
					PIPECONF_ENABLE |
4782
					I965_PIPECONF_ACTIVE;
4783
		}
4784
	}
4785
 
3031 serge 4786
	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4787
	drm_mode_debug_printmodeline(mode);
4788
 
4789
	if (HAS_PIPE_CXSR(dev)) {
4790
		if (intel_crtc->lowfreq_avail) {
4791
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4792
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4793
		} else {
4794
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4795
			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4796
			}
4797
		}
4798
 
2360 Serge 4799
	pipeconf &= ~PIPECONF_INTERLACE_MASK;
3031 serge 4800
	if (!IS_GEN2(dev) &&
3243 Serge 4801
	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
3031 serge 4802
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3243 Serge 4803
	else
2360 Serge 4804
		pipeconf |= PIPECONF_PROGRESSIVE;
2327 Serge 4805
 
3243 Serge 4806
	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
2327 Serge 4807
 
3031 serge 4808
	/* pipesrc and dspsize control the size that is scaled from,
4809
	 * which should always be the user's requested size.
4810
	 */
4811
	I915_WRITE(DSPSIZE(plane),
4812
		   ((mode->vdisplay - 1) << 16) |
4813
		   (mode->hdisplay - 1));
4814
	I915_WRITE(DSPPOS(plane), 0);
2327 Serge 4815
 
3031 serge 4816
	I915_WRITE(PIPECONF(pipe), pipeconf);
4817
	POSTING_READ(PIPECONF(pipe));
4818
	intel_enable_pipe(dev_priv, pipe, false);
2327 Serge 4819
 
3031 serge 4820
	intel_wait_for_vblank(dev, pipe);
2327 Serge 4821
 
3031 serge 4822
	I915_WRITE(DSPCNTR(plane), dspcntr);
4823
	POSTING_READ(DSPCNTR(plane));
2327 Serge 4824
 
3031 serge 4825
	ret = intel_pipe_set_base(crtc, x, y, fb);
2327 Serge 4826
 
3031 serge 4827
	intel_update_watermarks(dev);
4828
 
2327 Serge 4829
    return ret;
4830
}
4831
 
3243 Serge 4832
static void ironlake_init_pch_refclk(struct drm_device *dev)
2327 Serge 4833
{
4834
	struct drm_i915_private *dev_priv = dev->dev_private;
4835
	struct drm_mode_config *mode_config = &dev->mode_config;
4836
	struct intel_encoder *encoder;
4837
	u32 temp;
4838
	bool has_lvds = false;
2342 Serge 4839
	bool has_cpu_edp = false;
4840
	bool has_pch_edp = false;
4841
	bool has_panel = false;
4842
	bool has_ck505 = false;
4843
	bool can_ssc = false;
2327 Serge 4844
 
4845
	/* We need to take the global config into account */
4846
		list_for_each_entry(encoder, &mode_config->encoder_list,
4847
				    base.head) {
4848
			switch (encoder->type) {
4849
			case INTEL_OUTPUT_LVDS:
2342 Serge 4850
			has_panel = true;
2327 Serge 4851
				has_lvds = true;
2342 Serge 4852
			break;
2327 Serge 4853
			case INTEL_OUTPUT_EDP:
2342 Serge 4854
			has_panel = true;
4855
			if (intel_encoder_is_pch_edp(&encoder->base))
4856
				has_pch_edp = true;
4857
			else
4858
				has_cpu_edp = true;
2327 Serge 4859
				break;
4860
			}
4861
		}
2342 Serge 4862
 
4863
	if (HAS_PCH_IBX(dev)) {
4864
		has_ck505 = dev_priv->display_clock_mode;
4865
		can_ssc = has_ck505;
4866
	} else {
4867
		has_ck505 = false;
4868
		can_ssc = true;
2327 Serge 4869
	}
4870
 
2342 Serge 4871
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
4872
		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
4873
		      has_ck505);
4874
 
2327 Serge 4875
	/* Ironlake: try to setup display ref clock before DPLL
4876
	 * enabling. This is only under driver's control after
4877
	 * PCH B stepping, previous chipset stepping should be
4878
	 * ignoring this setting.
4879
	 */
4880
	temp = I915_READ(PCH_DREF_CONTROL);
4881
	/* Always enable nonspread source */
4882
	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
2342 Serge 4883
 
4884
	if (has_ck505)
4885
		temp |= DREF_NONSPREAD_CK505_ENABLE;
4886
	else
2327 Serge 4887
	temp |= DREF_NONSPREAD_SOURCE_ENABLE;
2342 Serge 4888
 
4889
	if (has_panel) {
2327 Serge 4890
	temp &= ~DREF_SSC_SOURCE_MASK;
4891
	temp |= DREF_SSC_SOURCE_ENABLE;
4892
 
2342 Serge 4893
		/* SSC must be turned on before enabling the CPU output  */
4894
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4895
			DRM_DEBUG_KMS("Using SSC on panel\n");
4896
			temp |= DREF_SSC1_ENABLE;
3031 serge 4897
		} else
4898
			temp &= ~DREF_SSC1_ENABLE;
2327 Serge 4899
 
2342 Serge 4900
		/* Get SSC going before enabling the outputs */
2327 Serge 4901
			I915_WRITE(PCH_DREF_CONTROL, temp);
4902
			POSTING_READ(PCH_DREF_CONTROL);
4903
			udelay(200);
2342 Serge 4904
 
2327 Serge 4905
		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4906
 
4907
		/* Enable CPU source on CPU attached eDP */
2342 Serge 4908
		if (has_cpu_edp) {
4909
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4910
				DRM_DEBUG_KMS("Using SSC on eDP\n");
2327 Serge 4911
				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
2342 Serge 4912
			}
2327 Serge 4913
			else
4914
				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
2342 Serge 4915
		} else
4916
			temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4917
 
4918
		I915_WRITE(PCH_DREF_CONTROL, temp);
4919
		POSTING_READ(PCH_DREF_CONTROL);
4920
		udelay(200);
2327 Serge 4921
		} else {
2342 Serge 4922
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
4923
 
4924
		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4925
 
4926
		/* Turn off CPU output */
4927
		temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4928
 
2327 Serge 4929
		I915_WRITE(PCH_DREF_CONTROL, temp);
4930
		POSTING_READ(PCH_DREF_CONTROL);
4931
		udelay(200);
2342 Serge 4932
 
4933
		/* Turn off the SSC source */
4934
		temp &= ~DREF_SSC_SOURCE_MASK;
4935
		temp |= DREF_SSC_SOURCE_DISABLE;
4936
 
4937
		/* Turn off SSC1 */
4938
		temp &= ~ DREF_SSC1_ENABLE;
4939
 
4940
		I915_WRITE(PCH_DREF_CONTROL, temp);
4941
		POSTING_READ(PCH_DREF_CONTROL);
4942
		udelay(200);
2327 Serge 4943
	}
4944
}
4945
 
3243 Serge 4946
/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
4947
static void lpt_init_pch_refclk(struct drm_device *dev)
4948
{
4949
	struct drm_i915_private *dev_priv = dev->dev_private;
4950
	struct drm_mode_config *mode_config = &dev->mode_config;
4951
	struct intel_encoder *encoder;
4952
	bool has_vga = false;
4953
	bool is_sdv = false;
4954
	u32 tmp;
4955
 
4956
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4957
		switch (encoder->type) {
4958
		case INTEL_OUTPUT_ANALOG:
4959
			has_vga = true;
4960
			break;
4961
		}
4962
	}
4963
 
4964
	if (!has_vga)
4965
		return;
4966
 
4967
	/* XXX: Rip out SDV support once Haswell ships for real. */
4968
	if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
4969
		is_sdv = true;
4970
 
4971
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4972
	tmp &= ~SBI_SSCCTL_DISABLE;
4973
	tmp |= SBI_SSCCTL_PATHALT;
4974
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4975
 
4976
	udelay(24);
4977
 
4978
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4979
	tmp &= ~SBI_SSCCTL_PATHALT;
4980
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4981
 
4982
	if (!is_sdv) {
4983
		tmp = I915_READ(SOUTH_CHICKEN2);
4984
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
4985
		I915_WRITE(SOUTH_CHICKEN2, tmp);
4986
 
4987
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
4988
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
4989
			DRM_ERROR("FDI mPHY reset assert timeout\n");
4990
 
4991
		tmp = I915_READ(SOUTH_CHICKEN2);
4992
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
4993
		I915_WRITE(SOUTH_CHICKEN2, tmp);
4994
 
4995
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
4996
				        FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
4997
				       100))
4998
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
4999
	}
5000
 
5001
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5002
	tmp &= ~(0xFF << 24);
5003
	tmp |= (0x12 << 24);
5004
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5005
 
5006
	if (!is_sdv) {
5007
		tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
5008
		tmp &= ~(0x3 << 6);
5009
		tmp |= (1 << 6) | (1 << 0);
5010
		intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
5011
	}
5012
 
5013
	if (is_sdv) {
5014
		tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
5015
		tmp |= 0x7FFF;
5016
		intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
5017
	}
5018
 
5019
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5020
	tmp |= (1 << 11);
5021
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5022
 
5023
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5024
	tmp |= (1 << 11);
5025
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5026
 
5027
	if (is_sdv) {
5028
		tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
5029
		tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5030
		intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
5031
 
5032
		tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
5033
		tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5034
		intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
5035
 
5036
		tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
5037
		tmp |= (0x3F << 8);
5038
		intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
5039
 
5040
		tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
5041
		tmp |= (0x3F << 8);
5042
		intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
5043
	}
5044
 
5045
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5046
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5047
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5048
 
5049
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5050
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5051
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5052
 
5053
	if (!is_sdv) {
5054
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5055
		tmp &= ~(7 << 13);
5056
		tmp |= (5 << 13);
5057
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5058
 
5059
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5060
		tmp &= ~(7 << 13);
5061
		tmp |= (5 << 13);
5062
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5063
	}
5064
 
5065
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5066
	tmp &= ~0xFF;
5067
	tmp |= 0x1C;
5068
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5069
 
5070
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5071
	tmp &= ~0xFF;
5072
	tmp |= 0x1C;
5073
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5074
 
5075
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5076
	tmp &= ~(0xFF << 16);
5077
	tmp |= (0x1C << 16);
5078
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5079
 
5080
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5081
	tmp &= ~(0xFF << 16);
5082
	tmp |= (0x1C << 16);
5083
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5084
 
5085
	if (!is_sdv) {
5086
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5087
		tmp |= (1 << 27);
5088
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5089
 
5090
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5091
		tmp |= (1 << 27);
5092
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5093
 
5094
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5095
		tmp &= ~(0xF << 28);
5096
		tmp |= (4 << 28);
5097
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5098
 
5099
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5100
		tmp &= ~(0xF << 28);
5101
		tmp |= (4 << 28);
5102
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5103
	}
5104
 
5105
	/* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
5106
	tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
5107
	tmp |= SBI_DBUFF0_ENABLE;
5108
	intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
5109
}
5110
 
5111
/*
5112
 * Initialize reference clocks when the driver loads
5113
 */
5114
void intel_init_pch_refclk(struct drm_device *dev)
5115
{
5116
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5117
		ironlake_init_pch_refclk(dev);
5118
	else if (HAS_PCH_LPT(dev))
5119
		lpt_init_pch_refclk(dev);
5120
}
5121
 
2342 Serge 5122
static int ironlake_get_refclk(struct drm_crtc *crtc)
5123
{
5124
	struct drm_device *dev = crtc->dev;
5125
	struct drm_i915_private *dev_priv = dev->dev_private;
5126
	struct intel_encoder *encoder;
5127
	struct intel_encoder *edp_encoder = NULL;
5128
	int num_connectors = 0;
5129
	bool is_lvds = false;
5130
 
3031 serge 5131
	for_each_encoder_on_crtc(dev, crtc, encoder) {
2342 Serge 5132
		switch (encoder->type) {
5133
		case INTEL_OUTPUT_LVDS:
5134
			is_lvds = true;
5135
			break;
5136
		case INTEL_OUTPUT_EDP:
5137
			edp_encoder = encoder;
5138
			break;
5139
		}
5140
		num_connectors++;
5141
	}
5142
 
5143
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5144
		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5145
			      dev_priv->lvds_ssc_freq);
5146
		return dev_priv->lvds_ssc_freq * 1000;
5147
	}
5148
 
5149
	return 120000;
5150
}
5151
 
3031 serge 5152
static void ironlake_set_pipeconf(struct drm_crtc *crtc,
5153
				  struct drm_display_mode *adjusted_mode,
5154
				  bool dither)
5155
{
5156
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5157
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5158
	int pipe = intel_crtc->pipe;
5159
	uint32_t val;
5160
 
5161
	val = I915_READ(PIPECONF(pipe));
5162
 
5163
	val &= ~PIPE_BPC_MASK;
5164
	switch (intel_crtc->bpp) {
5165
	case 18:
5166
		val |= PIPE_6BPC;
5167
		break;
5168
	case 24:
5169
		val |= PIPE_8BPC;
5170
		break;
5171
	case 30:
5172
		val |= PIPE_10BPC;
5173
		break;
5174
	case 36:
5175
		val |= PIPE_12BPC;
5176
		break;
5177
	default:
3243 Serge 5178
		/* Case prevented by intel_choose_pipe_bpp_dither. */
5179
		BUG();
3031 serge 5180
	}
5181
 
5182
	val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
5183
	if (dither)
5184
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5185
 
5186
	val &= ~PIPECONF_INTERLACE_MASK;
5187
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5188
		val |= PIPECONF_INTERLACED_ILK;
5189
	else
5190
		val |= PIPECONF_PROGRESSIVE;
5191
 
5192
	I915_WRITE(PIPECONF(pipe), val);
5193
	POSTING_READ(PIPECONF(pipe));
5194
}
5195
 
3243 Serge 5196
static void haswell_set_pipeconf(struct drm_crtc *crtc,
5197
				 struct drm_display_mode *adjusted_mode,
5198
				 bool dither)
5199
{
5200
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5201
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5202
	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
5203
	uint32_t val;
5204
 
5205
	val = I915_READ(PIPECONF(cpu_transcoder));
5206
 
5207
	val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
5208
	if (dither)
5209
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5210
 
5211
	val &= ~PIPECONF_INTERLACE_MASK_HSW;
5212
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5213
		val |= PIPECONF_INTERLACED_ILK;
5214
	else
5215
		val |= PIPECONF_PROGRESSIVE;
5216
 
5217
	I915_WRITE(PIPECONF(cpu_transcoder), val);
5218
	POSTING_READ(PIPECONF(cpu_transcoder));
5219
}
5220
 
3031 serge 5221
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5222
				    struct drm_display_mode *adjusted_mode,
5223
				    intel_clock_t *clock,
5224
				    bool *has_reduced_clock,
5225
				    intel_clock_t *reduced_clock)
5226
{
5227
	struct drm_device *dev = crtc->dev;
5228
	struct drm_i915_private *dev_priv = dev->dev_private;
5229
	struct intel_encoder *intel_encoder;
5230
	int refclk;
5231
	const intel_limit_t *limit;
5232
	bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
5233
 
5234
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5235
		switch (intel_encoder->type) {
5236
		case INTEL_OUTPUT_LVDS:
5237
			is_lvds = true;
5238
			break;
5239
		case INTEL_OUTPUT_SDVO:
5240
		case INTEL_OUTPUT_HDMI:
5241
			is_sdvo = true;
5242
			if (intel_encoder->needs_tv_clock)
5243
				is_tv = true;
5244
			break;
5245
		case INTEL_OUTPUT_TVOUT:
5246
			is_tv = true;
5247
			break;
5248
		}
5249
	}
5250
 
5251
	refclk = ironlake_get_refclk(crtc);
5252
 
5253
	/*
5254
	 * Returns a set of divisors for the desired target clock with the given
5255
	 * refclk, or FALSE.  The returned values represent the clock equation:
5256
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5257
	 */
5258
	limit = intel_limit(crtc, refclk);
5259
	ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5260
			      clock);
5261
	if (!ret)
5262
		return false;
5263
 
5264
	if (is_lvds && dev_priv->lvds_downclock_avail) {
5265
		/*
5266
		 * Ensure we match the reduced clock's P to the target clock.
5267
		 * If the clocks don't match, we can't switch the display clock
5268
		 * by using the FP0/FP1. In such case we will disable the LVDS
5269
		 * downclock feature.
5270
		*/
5271
		*has_reduced_clock = limit->find_pll(limit, crtc,
5272
						     dev_priv->lvds_downclock,
5273
						     refclk,
5274
						     clock,
5275
						     reduced_clock);
5276
	}
5277
 
5278
	if (is_sdvo && is_tv)
5279
		i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
5280
 
5281
	return true;
5282
}
5283
 
3243 Serge 5284
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
5285
{
5286
	struct drm_i915_private *dev_priv = dev->dev_private;
5287
	uint32_t temp;
5288
 
5289
	temp = I915_READ(SOUTH_CHICKEN1);
5290
	if (temp & FDI_BC_BIFURCATION_SELECT)
5291
		return;
5292
 
5293
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5294
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5295
 
5296
	temp |= FDI_BC_BIFURCATION_SELECT;
5297
	DRM_DEBUG_KMS("enabling fdi C rx\n");
5298
	I915_WRITE(SOUTH_CHICKEN1, temp);
5299
	POSTING_READ(SOUTH_CHICKEN1);
5300
}
5301
 
5302
static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
5303
{
5304
	struct drm_device *dev = intel_crtc->base.dev;
5305
	struct drm_i915_private *dev_priv = dev->dev_private;
5306
	struct intel_crtc *pipe_B_crtc =
5307
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5308
 
5309
	DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
5310
		      intel_crtc->pipe, intel_crtc->fdi_lanes);
5311
	if (intel_crtc->fdi_lanes > 4) {
5312
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
5313
			      intel_crtc->pipe, intel_crtc->fdi_lanes);
5314
		/* Clamp lanes to avoid programming the hw with bogus values. */
5315
		intel_crtc->fdi_lanes = 4;
5316
 
5317
		return false;
5318
	}
5319
 
5320
	if (dev_priv->num_pipe == 2)
5321
		return true;
5322
 
5323
	switch (intel_crtc->pipe) {
5324
	case PIPE_A:
5325
		return true;
5326
	case PIPE_B:
5327
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5328
		    intel_crtc->fdi_lanes > 2) {
5329
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5330
				      intel_crtc->pipe, intel_crtc->fdi_lanes);
5331
			/* Clamp lanes to avoid programming the hw with bogus values. */
5332
			intel_crtc->fdi_lanes = 2;
5333
 
5334
			return false;
5335
		}
5336
 
5337
		if (intel_crtc->fdi_lanes > 2)
5338
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
5339
		else
5340
			cpt_enable_fdi_bc_bifurcation(dev);
5341
 
5342
		return true;
5343
	case PIPE_C:
5344
		if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
5345
			if (intel_crtc->fdi_lanes > 2) {
5346
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5347
					      intel_crtc->pipe, intel_crtc->fdi_lanes);
5348
				/* Clamp lanes to avoid programming the hw with bogus values. */
5349
				intel_crtc->fdi_lanes = 2;
5350
 
5351
				return false;
5352
			}
5353
		} else {
5354
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5355
			return false;
5356
		}
5357
 
5358
		cpt_enable_fdi_bc_bifurcation(dev);
5359
 
5360
		return true;
5361
	default:
5362
		BUG();
5363
	}
5364
}
5365
 
5366
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5367
{
5368
	/*
5369
	 * Account for spread spectrum to avoid
5370
	 * oversubscribing the link. Max center spread
5371
	 * is 2.5%; use 5% for safety's sake.
5372
	 */
5373
	u32 bps = target_clock * bpp * 21 / 20;
5374
	return bps / (link_bw * 8) + 1;
5375
}
5376
 
5377
static void ironlake_set_m_n(struct drm_crtc *crtc,
2327 Serge 5378
                  struct drm_display_mode *mode,
3243 Serge 5379
			     struct drm_display_mode *adjusted_mode)
2327 Serge 5380
{
5381
    struct drm_device *dev = crtc->dev;
5382
    struct drm_i915_private *dev_priv = dev->dev_private;
5383
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3243 Serge 5384
	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
5385
	struct intel_encoder *intel_encoder, *edp_encoder = NULL;
2327 Serge 5386
    struct fdi_m_n m_n = {0};
3243 Serge 5387
	int target_clock, pixel_multiplier, lane, link_bw;
5388
	bool is_dp = false, is_cpu_edp = false;
2327 Serge 5389
 
3243 Serge 5390
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5391
		switch (intel_encoder->type) {
2327 Serge 5392
        case INTEL_OUTPUT_DISPLAYPORT:
5393
            is_dp = true;
5394
            break;
5395
        case INTEL_OUTPUT_EDP:
3031 serge 5396
			is_dp = true;
3243 Serge 5397
			if (!intel_encoder_is_pch_edp(&intel_encoder->base))
3031 serge 5398
				is_cpu_edp = true;
3243 Serge 5399
			edp_encoder = intel_encoder;
2327 Serge 5400
            break;
5401
        }
5402
    }
5403
 
5404
    /* FDI link */
5405
    pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5406
    lane = 0;
5407
    /* CPU eDP doesn't require FDI link, so just set DP M/N
5408
       according to current link config */
3031 serge 5409
	if (is_cpu_edp) {
5410
		intel_edp_link_config(edp_encoder, &lane, &link_bw);
2327 Serge 5411
    } else {
5412
        /* FDI is a binary signal running at ~2.7GHz, encoding
5413
         * each output octet as 10 bits. The actual frequency
5414
         * is stored as a divider into a 100MHz clock, and the
5415
         * mode pixel clock is stored in units of 1KHz.
5416
         * Hence the bw of each lane in terms of the mode signal
5417
         * is:
5418
         */
5419
        link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5420
    }
5421
 
3031 serge 5422
	/* [e]DP over FDI requires target mode clock instead of link clock. */
5423
	if (edp_encoder)
5424
		target_clock = intel_edp_target_clock(edp_encoder, mode);
5425
	else if (is_dp)
5426
		target_clock = mode->clock;
5427
	else
5428
		target_clock = adjusted_mode->clock;
5429
 
3243 Serge 5430
	if (!lane)
5431
		lane = ironlake_get_lanes_required(target_clock, link_bw,
5432
						   intel_crtc->bpp);
3031 serge 5433
 
2327 Serge 5434
    intel_crtc->fdi_lanes = lane;
5435
 
5436
    if (pixel_multiplier > 1)
5437
        link_bw *= pixel_multiplier;
5438
    ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5439
                 &m_n);
5440
 
3243 Serge 5441
	I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
5442
	I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
5443
	I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
5444
	I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
5445
}
2327 Serge 5446
 
3243 Serge 5447
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5448
				      struct drm_display_mode *adjusted_mode,
5449
				      intel_clock_t *clock, u32 fp)
5450
{
5451
	struct drm_crtc *crtc = &intel_crtc->base;
5452
	struct drm_device *dev = crtc->dev;
5453
	struct drm_i915_private *dev_priv = dev->dev_private;
5454
	struct intel_encoder *intel_encoder;
5455
	uint32_t dpll;
5456
	int factor, pixel_multiplier, num_connectors = 0;
5457
	bool is_lvds = false, is_sdvo = false, is_tv = false;
5458
	bool is_dp = false, is_cpu_edp = false;
5459
 
5460
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5461
		switch (intel_encoder->type) {
5462
		case INTEL_OUTPUT_LVDS:
5463
			is_lvds = true;
5464
			break;
5465
		case INTEL_OUTPUT_SDVO:
5466
		case INTEL_OUTPUT_HDMI:
5467
			is_sdvo = true;
5468
			if (intel_encoder->needs_tv_clock)
5469
				is_tv = true;
5470
			break;
5471
		case INTEL_OUTPUT_TVOUT:
5472
			is_tv = true;
5473
			break;
5474
		case INTEL_OUTPUT_DISPLAYPORT:
5475
			is_dp = true;
5476
			break;
5477
		case INTEL_OUTPUT_EDP:
5478
			is_dp = true;
5479
			if (!intel_encoder_is_pch_edp(&intel_encoder->base))
5480
				is_cpu_edp = true;
5481
			break;
5482
		}
5483
 
5484
		num_connectors++;
5485
	}
5486
 
2327 Serge 5487
    /* Enable autotuning of the PLL clock (if permissible) */
5488
    factor = 21;
5489
    if (is_lvds) {
5490
        if ((intel_panel_use_ssc(dev_priv) &&
5491
             dev_priv->lvds_ssc_freq == 100) ||
5492
            (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5493
            factor = 25;
5494
    } else if (is_sdvo && is_tv)
5495
        factor = 20;
5496
 
3243 Serge 5497
	if (clock->m < factor * clock->n)
2327 Serge 5498
        fp |= FP_CB_TUNE;
5499
 
5500
    dpll = 0;
5501
 
5502
    if (is_lvds)
5503
        dpll |= DPLLB_MODE_LVDS;
5504
    else
5505
        dpll |= DPLLB_MODE_DAC_SERIAL;
5506
    if (is_sdvo) {
3243 Serge 5507
		pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
2327 Serge 5508
        if (pixel_multiplier > 1) {
5509
            dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5510
        }
5511
        dpll |= DPLL_DVO_HIGH_SPEED;
5512
    }
3031 serge 5513
	if (is_dp && !is_cpu_edp)
2327 Serge 5514
        dpll |= DPLL_DVO_HIGH_SPEED;
5515
 
5516
    /* compute bitmask from p1 value */
3243 Serge 5517
	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
2327 Serge 5518
    /* also FPA1 */
3243 Serge 5519
	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2327 Serge 5520
 
3243 Serge 5521
	switch (clock->p2) {
2327 Serge 5522
    case 5:
5523
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5524
        break;
5525
    case 7:
5526
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5527
        break;
5528
    case 10:
5529
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5530
        break;
5531
    case 14:
5532
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5533
        break;
5534
    }
5535
 
5536
    if (is_sdvo && is_tv)
5537
        dpll |= PLL_REF_INPUT_TVCLKINBC;
5538
    else if (is_tv)
5539
        /* XXX: just matching BIOS for now */
5540
        /*  dpll |= PLL_REF_INPUT_TVCLKINBC; */
5541
        dpll |= 3;
5542
    else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5543
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5544
    else
5545
        dpll |= PLL_REF_INPUT_DREFCLK;
5546
 
3243 Serge 5547
	return dpll;
5548
}
5549
 
5550
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5551
				  struct drm_display_mode *mode,
5552
				  struct drm_display_mode *adjusted_mode,
5553
				  int x, int y,
5554
				  struct drm_framebuffer *fb)
5555
{
5556
	struct drm_device *dev = crtc->dev;
5557
	struct drm_i915_private *dev_priv = dev->dev_private;
5558
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5559
	int pipe = intel_crtc->pipe;
5560
	int plane = intel_crtc->plane;
5561
	int num_connectors = 0;
5562
	intel_clock_t clock, reduced_clock;
5563
	u32 dpll, fp = 0, fp2 = 0;
5564
	bool ok, has_reduced_clock = false;
5565
	bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5566
	struct intel_encoder *encoder;
5567
	u32 temp;
5568
	int ret;
5569
	bool dither, fdi_config_ok;
5570
 
5571
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5572
		switch (encoder->type) {
5573
		case INTEL_OUTPUT_LVDS:
5574
			is_lvds = true;
5575
			break;
5576
		case INTEL_OUTPUT_DISPLAYPORT:
5577
			is_dp = true;
5578
			break;
5579
		case INTEL_OUTPUT_EDP:
5580
			is_dp = true;
5581
			if (!intel_encoder_is_pch_edp(&encoder->base))
5582
				is_cpu_edp = true;
5583
			break;
5584
		}
5585
 
5586
		num_connectors++;
5587
	}
5588
 
5589
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5590
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
5591
 
5592
	ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5593
				     &has_reduced_clock, &reduced_clock);
5594
	if (!ok) {
5595
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5596
		return -EINVAL;
5597
	}
5598
 
5599
	/* Ensure that the cursor is valid for the new mode before changing... */
5600
//	intel_crtc_update_cursor(crtc, true);
5601
 
5602
	/* determine panel color depth */
5603
	dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5604
					      adjusted_mode);
5605
	if (is_lvds && dev_priv->lvds_dither)
5606
		dither = true;
5607
 
5608
	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5609
	if (has_reduced_clock)
5610
		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5611
			reduced_clock.m2;
5612
 
5613
	dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
5614
 
2342 Serge 5615
	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
2327 Serge 5616
    drm_mode_debug_printmodeline(mode);
5617
 
3243 Serge 5618
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5619
	if (!is_cpu_edp) {
3031 serge 5620
		struct intel_pch_pll *pll;
2327 Serge 5621
 
3031 serge 5622
		pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5623
		if (pll == NULL) {
5624
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5625
					 pipe);
2342 Serge 5626
			return -EINVAL;
2327 Serge 5627
        }
3031 serge 5628
	} else
5629
		intel_put_pch_pll(intel_crtc);
2327 Serge 5630
 
5631
    /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5632
     * This is an exception to the general rule that mode_set doesn't turn
5633
     * things on.
5634
     */
5635
    if (is_lvds) {
5636
        temp = I915_READ(PCH_LVDS);
5637
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
2342 Serge 5638
		if (HAS_PCH_CPT(dev)) {
5639
			temp &= ~PORT_TRANS_SEL_MASK;
5640
			temp |= PORT_TRANS_SEL_CPT(pipe);
5641
		} else {
5642
			if (pipe == 1)
2327 Serge 5643
                temp |= LVDS_PIPEB_SELECT;
5644
            else
5645
                temp &= ~LVDS_PIPEB_SELECT;
5646
        }
2342 Serge 5647
 
2327 Serge 5648
        /* set the corresponsding LVDS_BORDER bit */
5649
        temp |= dev_priv->lvds_border_bits;
5650
        /* Set the B0-B3 data pairs corresponding to whether we're going to
5651
         * set the DPLLs for dual-channel mode or not.
5652
         */
5653
        if (clock.p2 == 7)
5654
            temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5655
        else
5656
            temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5657
 
5658
        /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5659
         * appropriately here, but we need to look more thoroughly into how
5660
         * panels behave in the two modes.
5661
         */
3031 serge 5662
		temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
2327 Serge 5663
        if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
3031 serge 5664
			temp |= LVDS_HSYNC_POLARITY;
2327 Serge 5665
        if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
3031 serge 5666
			temp |= LVDS_VSYNC_POLARITY;
2327 Serge 5667
        I915_WRITE(PCH_LVDS, temp);
5668
    }
5669
 
3031 serge 5670
	if (is_dp && !is_cpu_edp) {
2327 Serge 5671
        intel_dp_set_m_n(crtc, mode, adjusted_mode);
5672
    } else {
5673
        /* For non-DP output, clear any trans DP clock recovery setting.*/
5674
        I915_WRITE(TRANSDATA_M1(pipe), 0);
5675
        I915_WRITE(TRANSDATA_N1(pipe), 0);
5676
        I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5677
        I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5678
    }
5679
 
3031 serge 5680
	if (intel_crtc->pch_pll) {
5681
		I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
2327 Serge 5682
 
5683
        /* Wait for the clocks to stabilize. */
3031 serge 5684
		POSTING_READ(intel_crtc->pch_pll->pll_reg);
2327 Serge 5685
        udelay(150);
5686
 
5687
        /* The pixel multiplier can only be updated once the
5688
         * DPLL is enabled and the clocks are stable.
5689
         *
5690
         * So write it again.
5691
         */
3031 serge 5692
		I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
2327 Serge 5693
    }
5694
 
5695
    intel_crtc->lowfreq_avail = false;
3031 serge 5696
	if (intel_crtc->pch_pll) {
2327 Serge 5697
    if (is_lvds && has_reduced_clock && i915_powersave) {
3031 serge 5698
			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
2327 Serge 5699
        intel_crtc->lowfreq_avail = true;
5700
    } else {
3031 serge 5701
			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
2327 Serge 5702
    }
2342 Serge 5703
	}
2327 Serge 5704
 
3243 Serge 5705
	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5706
 
5707
	/* Note, this also computes intel_crtc->fdi_lanes which is used below in
5708
	 * ironlake_check_fdi_lanes. */
5709
	ironlake_set_m_n(crtc, mode, adjusted_mode);
5710
 
5711
	fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
5712
 
5713
	if (is_cpu_edp)
5714
		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5715
 
5716
	ironlake_set_pipeconf(crtc, adjusted_mode, dither);
5717
 
5718
	intel_wait_for_vblank(dev, pipe);
5719
 
5720
	/* Set up the display plane register */
5721
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
5722
	POSTING_READ(DSPCNTR(plane));
5723
 
5724
	ret = intel_pipe_set_base(crtc, x, y, fb);
5725
 
5726
	intel_update_watermarks(dev);
5727
 
5728
	intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5729
 
5730
	return fdi_config_ok ? ret : -EINVAL;
5731
}
5732
 
5733
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5734
				 struct drm_display_mode *mode,
5735
				 struct drm_display_mode *adjusted_mode,
5736
				 int x, int y,
5737
				 struct drm_framebuffer *fb)
5738
{
5739
	struct drm_device *dev = crtc->dev;
5740
	struct drm_i915_private *dev_priv = dev->dev_private;
5741
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5742
	int pipe = intel_crtc->pipe;
5743
	int plane = intel_crtc->plane;
5744
	int num_connectors = 0;
5745
	intel_clock_t clock, reduced_clock;
5746
	u32 dpll = 0, fp = 0, fp2 = 0;
5747
	bool ok, has_reduced_clock = false;
5748
	bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5749
	struct intel_encoder *encoder;
5750
	u32 temp;
5751
	int ret;
5752
	bool dither;
5753
 
5754
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5755
		switch (encoder->type) {
5756
		case INTEL_OUTPUT_LVDS:
5757
			is_lvds = true;
5758
			break;
5759
		case INTEL_OUTPUT_DISPLAYPORT:
5760
			is_dp = true;
5761
			break;
5762
		case INTEL_OUTPUT_EDP:
5763
			is_dp = true;
5764
			if (!intel_encoder_is_pch_edp(&encoder->base))
5765
				is_cpu_edp = true;
5766
			break;
5767
		}
5768
 
5769
		num_connectors++;
5770
	}
5771
 
5772
	if (is_cpu_edp)
5773
		intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5774
	else
5775
		intel_crtc->cpu_transcoder = pipe;
5776
 
5777
	/* We are not sure yet this won't happen. */
5778
	WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5779
	     INTEL_PCH_TYPE(dev));
5780
 
5781
	WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
5782
	     num_connectors, pipe_name(pipe));
5783
 
5784
	WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
5785
		(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
5786
 
5787
	WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
5788
 
5789
	if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
5790
		return -EINVAL;
5791
 
5792
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5793
		ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5794
					     &has_reduced_clock,
5795
					     &reduced_clock);
5796
		if (!ok) {
5797
			DRM_ERROR("Couldn't find PLL settings for mode!\n");
5798
			return -EINVAL;
5799
		}
5800
	}
5801
 
5802
	/* Ensure that the cursor is valid for the new mode before changing... */
5803
//   intel_crtc_update_cursor(crtc, true);
5804
 
5805
	/* determine panel color depth */
5806
	dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5807
					      adjusted_mode);
5808
	if (is_lvds && dev_priv->lvds_dither)
5809
		dither = true;
5810
 
5811
	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5812
	drm_mode_debug_printmodeline(mode);
5813
 
5814
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5815
		fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5816
		if (has_reduced_clock)
5817
			fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5818
			      reduced_clock.m2;
5819
 
5820
		dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
5821
					     fp);
5822
 
5823
		/* CPU eDP is the only output that doesn't need a PCH PLL of its
5824
		 * own on pre-Haswell/LPT generation */
5825
		if (!is_cpu_edp) {
5826
			struct intel_pch_pll *pll;
5827
 
5828
			pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5829
			if (pll == NULL) {
5830
				DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5831
						 pipe);
5832
				return -EINVAL;
5833
			}
5834
		} else
5835
			intel_put_pch_pll(intel_crtc);
5836
 
5837
		/* The LVDS pin pair needs to be on before the DPLLs are
5838
		 * enabled.  This is an exception to the general rule that
5839
		 * mode_set doesn't turn things on.
5840
		 */
5841
		if (is_lvds) {
5842
			temp = I915_READ(PCH_LVDS);
5843
			temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5844
			if (HAS_PCH_CPT(dev)) {
5845
				temp &= ~PORT_TRANS_SEL_MASK;
5846
				temp |= PORT_TRANS_SEL_CPT(pipe);
3031 serge 5847
	} else {
3243 Serge 5848
				if (pipe == 1)
5849
					temp |= LVDS_PIPEB_SELECT;
5850
				else
5851
					temp &= ~LVDS_PIPEB_SELECT;
5852
			}
5853
 
5854
			/* set the corresponsding LVDS_BORDER bit */
5855
			temp |= dev_priv->lvds_border_bits;
5856
			/* Set the B0-B3 data pairs corresponding to whether
5857
			 * we're going to set the DPLLs for dual-channel mode or
5858
			 * not.
5859
			 */
5860
			if (clock.p2 == 7)
5861
				temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5862
			else
5863
				temp &= ~(LVDS_B0B3_POWER_UP |
5864
					  LVDS_CLKB_POWER_UP);
5865
 
5866
			/* It would be nice to set 24 vs 18-bit mode
5867
			 * (LVDS_A3_POWER_UP) appropriately here, but we need to
5868
			 * look more thoroughly into how panels behave in the
5869
			 * two modes.
5870
			 */
5871
			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5872
			if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5873
				temp |= LVDS_HSYNC_POLARITY;
5874
			if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5875
				temp |= LVDS_VSYNC_POLARITY;
5876
			I915_WRITE(PCH_LVDS, temp);
5877
		}
3031 serge 5878
	}
2327 Serge 5879
 
3243 Serge 5880
	if (is_dp && !is_cpu_edp) {
5881
		intel_dp_set_m_n(crtc, mode, adjusted_mode);
5882
	} else {
5883
		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5884
			/* For non-DP output, clear any trans DP clock recovery
5885
			 * setting.*/
5886
			I915_WRITE(TRANSDATA_M1(pipe), 0);
5887
			I915_WRITE(TRANSDATA_N1(pipe), 0);
5888
			I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5889
			I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5890
		}
5891
	}
2327 Serge 5892
 
3243 Serge 5893
	intel_crtc->lowfreq_avail = false;
5894
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5895
		if (intel_crtc->pch_pll) {
5896
			I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
2327 Serge 5897
 
3243 Serge 5898
			/* Wait for the clocks to stabilize. */
5899
			POSTING_READ(intel_crtc->pch_pll->pll_reg);
5900
			udelay(150);
5901
 
5902
			/* The pixel multiplier can only be updated once the
5903
			 * DPLL is enabled and the clocks are stable.
5904
			 *
5905
			 * So write it again.
2327 Serge 5906
     */
3243 Serge 5907
			I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5908
		}
2327 Serge 5909
 
3243 Serge 5910
		if (intel_crtc->pch_pll) {
5911
			if (is_lvds && has_reduced_clock && i915_powersave) {
5912
				I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
5913
				intel_crtc->lowfreq_avail = true;
5914
			} else {
5915
				I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
5916
			}
5917
		}
5918
	}
2327 Serge 5919
 
3243 Serge 5920
	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5921
 
5922
	if (!is_dp || is_cpu_edp)
5923
		ironlake_set_m_n(crtc, mode, adjusted_mode);
5924
 
5925
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
3031 serge 5926
	if (is_cpu_edp)
2327 Serge 5927
        ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5928
 
3243 Serge 5929
	haswell_set_pipeconf(crtc, adjusted_mode, dither);
2327 Serge 5930
 
3031 serge 5931
	/* Set up the display plane register */
5932
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
2327 Serge 5933
    POSTING_READ(DSPCNTR(plane));
5934
 
3031 serge 5935
	ret = intel_pipe_set_base(crtc, x, y, fb);
2327 Serge 5936
 
5937
    intel_update_watermarks(dev);
5938
 
3031 serge 5939
	intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
2336 Serge 5940
 
2327 Serge 5941
    return ret;
5942
}
5943
 
2330 Serge 5944
static int intel_crtc_mode_set(struct drm_crtc *crtc,
5945
			       struct drm_display_mode *mode,
5946
			       struct drm_display_mode *adjusted_mode,
5947
			       int x, int y,
3031 serge 5948
			       struct drm_framebuffer *fb)
2330 Serge 5949
{
5950
	struct drm_device *dev = crtc->dev;
5951
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 5952
	struct drm_encoder_helper_funcs *encoder_funcs;
5953
	struct intel_encoder *encoder;
2330 Serge 5954
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5955
	int pipe = intel_crtc->pipe;
5956
	int ret;
2327 Serge 5957
 
3031 serge 5958
	drm_vblank_pre_modeset(dev, pipe);
2327 Serge 5959
 
2330 Serge 5960
	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
3031 serge 5961
					      x, y, fb);
5962
	drm_vblank_post_modeset(dev, pipe);
2327 Serge 5963
 
3243 Serge 5964
	if (ret != 0)
2330 Serge 5965
	return ret;
3243 Serge 5966
 
5967
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5968
		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
5969
			encoder->base.base.id,
5970
			drm_get_encoder_name(&encoder->base),
5971
			mode->base.id, mode->name);
5972
		encoder_funcs = encoder->base.helper_private;
5973
		encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
5974
	}
5975
 
5976
	return 0;
2330 Serge 5977
}
2327 Serge 5978
 
2342 Serge 5979
static bool intel_eld_uptodate(struct drm_connector *connector,
5980
			       int reg_eldv, uint32_t bits_eldv,
5981
			       int reg_elda, uint32_t bits_elda,
5982
			       int reg_edid)
5983
{
5984
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
5985
	uint8_t *eld = connector->eld;
5986
	uint32_t i;
5987
 
5988
	i = I915_READ(reg_eldv);
5989
	i &= bits_eldv;
5990
 
5991
	if (!eld[0])
5992
		return !i;
5993
 
5994
	if (!i)
5995
		return false;
5996
 
5997
	i = I915_READ(reg_elda);
5998
	i &= ~bits_elda;
5999
	I915_WRITE(reg_elda, i);
6000
 
6001
	for (i = 0; i < eld[2]; i++)
6002
		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6003
			return false;
6004
 
6005
	return true;
6006
}
6007
 
6008
static void g4x_write_eld(struct drm_connector *connector,
6009
			  struct drm_crtc *crtc)
6010
{
6011
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6012
	uint8_t *eld = connector->eld;
6013
	uint32_t eldv;
6014
	uint32_t len;
6015
	uint32_t i;
6016
 
6017
	i = I915_READ(G4X_AUD_VID_DID);
6018
 
6019
	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6020
		eldv = G4X_ELDV_DEVCL_DEVBLC;
6021
	else
6022
		eldv = G4X_ELDV_DEVCTG;
6023
 
6024
	if (intel_eld_uptodate(connector,
6025
			       G4X_AUD_CNTL_ST, eldv,
6026
			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6027
			       G4X_HDMIW_HDMIEDID))
6028
		return;
6029
 
6030
	i = I915_READ(G4X_AUD_CNTL_ST);
6031
	i &= ~(eldv | G4X_ELD_ADDR);
6032
	len = (i >> 9) & 0x1f;		/* ELD buffer size */
6033
	I915_WRITE(G4X_AUD_CNTL_ST, i);
6034
 
6035
	if (!eld[0])
6036
		return;
6037
 
6038
	len = min_t(uint8_t, eld[2], len);
6039
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6040
	for (i = 0; i < len; i++)
6041
		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6042
 
6043
	i = I915_READ(G4X_AUD_CNTL_ST);
6044
	i |= eldv;
6045
	I915_WRITE(G4X_AUD_CNTL_ST, i);
6046
}
6047
 
3031 serge 6048
static void haswell_write_eld(struct drm_connector *connector,
6049
				     struct drm_crtc *crtc)
6050
{
6051
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6052
	uint8_t *eld = connector->eld;
6053
	struct drm_device *dev = crtc->dev;
6054
	uint32_t eldv;
6055
	uint32_t i;
6056
	int len;
6057
	int pipe = to_intel_crtc(crtc)->pipe;
6058
	int tmp;
6059
 
6060
	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
6061
	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
6062
	int aud_config = HSW_AUD_CFG(pipe);
6063
	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
6064
 
6065
 
6066
	DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
6067
 
6068
	/* Audio output enable */
6069
	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
6070
	tmp = I915_READ(aud_cntrl_st2);
6071
	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
6072
	I915_WRITE(aud_cntrl_st2, tmp);
6073
 
6074
	/* Wait for 1 vertical blank */
6075
	intel_wait_for_vblank(dev, pipe);
6076
 
6077
	/* Set ELD valid state */
6078
	tmp = I915_READ(aud_cntrl_st2);
6079
	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
6080
	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
6081
	I915_WRITE(aud_cntrl_st2, tmp);
6082
	tmp = I915_READ(aud_cntrl_st2);
6083
	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
6084
 
6085
	/* Enable HDMI mode */
6086
	tmp = I915_READ(aud_config);
6087
	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
6088
	/* clear N_programing_enable and N_value_index */
6089
	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
6090
	I915_WRITE(aud_config, tmp);
6091
 
6092
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
6093
 
6094
	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
6095
 
6096
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6097
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6098
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
6099
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6100
	} else
6101
		I915_WRITE(aud_config, 0);
6102
 
6103
	if (intel_eld_uptodate(connector,
6104
			       aud_cntrl_st2, eldv,
6105
			       aud_cntl_st, IBX_ELD_ADDRESS,
6106
			       hdmiw_hdmiedid))
6107
		return;
6108
 
6109
	i = I915_READ(aud_cntrl_st2);
6110
	i &= ~eldv;
6111
	I915_WRITE(aud_cntrl_st2, i);
6112
 
6113
	if (!eld[0])
6114
		return;
6115
 
6116
	i = I915_READ(aud_cntl_st);
6117
	i &= ~IBX_ELD_ADDRESS;
6118
	I915_WRITE(aud_cntl_st, i);
6119
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
6120
	DRM_DEBUG_DRIVER("port num:%d\n", i);
6121
 
6122
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
6123
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6124
	for (i = 0; i < len; i++)
6125
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6126
 
6127
	i = I915_READ(aud_cntrl_st2);
6128
	i |= eldv;
6129
	I915_WRITE(aud_cntrl_st2, i);
6130
 
6131
}
6132
 
2342 Serge 6133
static void ironlake_write_eld(struct drm_connector *connector,
6134
				     struct drm_crtc *crtc)
6135
{
6136
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6137
	uint8_t *eld = connector->eld;
6138
	uint32_t eldv;
6139
	uint32_t i;
6140
	int len;
6141
	int hdmiw_hdmiedid;
3031 serge 6142
	int aud_config;
2342 Serge 6143
	int aud_cntl_st;
6144
	int aud_cntrl_st2;
3031 serge 6145
	int pipe = to_intel_crtc(crtc)->pipe;
2342 Serge 6146
 
6147
	if (HAS_PCH_IBX(connector->dev)) {
3031 serge 6148
		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
6149
		aud_config = IBX_AUD_CFG(pipe);
6150
		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
2342 Serge 6151
		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6152
	} else {
3031 serge 6153
		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
6154
		aud_config = CPT_AUD_CFG(pipe);
6155
		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
2342 Serge 6156
		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6157
	}
6158
 
3031 serge 6159
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
2342 Serge 6160
 
6161
	i = I915_READ(aud_cntl_st);
3031 serge 6162
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
2342 Serge 6163
	if (!i) {
6164
		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6165
		/* operate blindly on all ports */
6166
		eldv = IBX_ELD_VALIDB;
6167
		eldv |= IBX_ELD_VALIDB << 4;
6168
		eldv |= IBX_ELD_VALIDB << 8;
6169
	} else {
6170
		DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
6171
		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6172
	}
6173
 
6174
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6175
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6176
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
3031 serge 6177
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6178
	} else
6179
		I915_WRITE(aud_config, 0);
2342 Serge 6180
 
6181
	if (intel_eld_uptodate(connector,
6182
			       aud_cntrl_st2, eldv,
6183
			       aud_cntl_st, IBX_ELD_ADDRESS,
6184
			       hdmiw_hdmiedid))
6185
		return;
6186
 
6187
	i = I915_READ(aud_cntrl_st2);
6188
	i &= ~eldv;
6189
	I915_WRITE(aud_cntrl_st2, i);
6190
 
6191
	if (!eld[0])
6192
		return;
6193
 
6194
	i = I915_READ(aud_cntl_st);
6195
	i &= ~IBX_ELD_ADDRESS;
6196
	I915_WRITE(aud_cntl_st, i);
6197
 
6198
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
6199
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6200
	for (i = 0; i < len; i++)
6201
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6202
 
6203
	i = I915_READ(aud_cntrl_st2);
6204
	i |= eldv;
6205
	I915_WRITE(aud_cntrl_st2, i);
6206
}
6207
 
6208
void intel_write_eld(struct drm_encoder *encoder,
6209
		     struct drm_display_mode *mode)
6210
{
6211
	struct drm_crtc *crtc = encoder->crtc;
6212
	struct drm_connector *connector;
6213
	struct drm_device *dev = encoder->dev;
6214
	struct drm_i915_private *dev_priv = dev->dev_private;
6215
 
6216
	connector = drm_select_eld(encoder, mode);
6217
	if (!connector)
6218
		return;
6219
 
6220
	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6221
			 connector->base.id,
6222
			 drm_get_connector_name(connector),
6223
			 connector->encoder->base.id,
6224
			 drm_get_encoder_name(connector->encoder));
6225
 
6226
	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6227
 
6228
	if (dev_priv->display.write_eld)
6229
		dev_priv->display.write_eld(connector, crtc);
6230
}
6231
 
2327 Serge 6232
/** Loads the palette/gamma unit for the CRTC with the prepared values */
6233
void intel_crtc_load_lut(struct drm_crtc *crtc)
6234
{
6235
	struct drm_device *dev = crtc->dev;
6236
	struct drm_i915_private *dev_priv = dev->dev_private;
6237
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6238
	int palreg = PALETTE(intel_crtc->pipe);
6239
	int i;
6240
 
6241
	/* The clocks have to be on to load the palette. */
3031 serge 6242
	if (!crtc->enabled || !intel_crtc->active)
2327 Serge 6243
		return;
6244
 
6245
	/* use legacy palette for Ironlake */
6246
	if (HAS_PCH_SPLIT(dev))
6247
		palreg = LGC_PALETTE(intel_crtc->pipe);
6248
 
6249
	for (i = 0; i < 256; i++) {
6250
		I915_WRITE(palreg + 4 * i,
6251
			   (intel_crtc->lut_r[i] << 16) |
6252
			   (intel_crtc->lut_g[i] << 8) |
6253
			   intel_crtc->lut_b[i]);
6254
	}
6255
}
6256
 
3031 serge 6257
#if 0
6258
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6259
{
6260
	struct drm_device *dev = crtc->dev;
6261
	struct drm_i915_private *dev_priv = dev->dev_private;
6262
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6263
	bool visible = base != 0;
6264
	u32 cntl;
2327 Serge 6265
 
3031 serge 6266
	if (intel_crtc->cursor_visible == visible)
6267
		return;
2327 Serge 6268
 
3031 serge 6269
	cntl = I915_READ(_CURACNTR);
6270
	if (visible) {
6271
		/* On these chipsets we can only modify the base whilst
6272
		 * the cursor is disabled.
6273
		 */
6274
		I915_WRITE(_CURABASE, base);
2327 Serge 6275
 
3031 serge 6276
		cntl &= ~(CURSOR_FORMAT_MASK);
6277
		/* XXX width must be 64, stride 256 => 0x00 << 28 */
6278
		cntl |= CURSOR_ENABLE |
6279
			CURSOR_GAMMA_ENABLE |
6280
			CURSOR_FORMAT_ARGB;
6281
	} else
6282
		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
6283
	I915_WRITE(_CURACNTR, cntl);
2327 Serge 6284
 
3031 serge 6285
	intel_crtc->cursor_visible = visible;
6286
}
2327 Serge 6287
 
3031 serge 6288
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6289
{
6290
	struct drm_device *dev = crtc->dev;
6291
	struct drm_i915_private *dev_priv = dev->dev_private;
6292
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6293
	int pipe = intel_crtc->pipe;
6294
	bool visible = base != 0;
2327 Serge 6295
 
3031 serge 6296
	if (intel_crtc->cursor_visible != visible) {
6297
		uint32_t cntl = I915_READ(CURCNTR(pipe));
6298
		if (base) {
6299
			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6300
			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6301
			cntl |= pipe << 28; /* Connect to correct pipe */
6302
		} else {
6303
			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6304
			cntl |= CURSOR_MODE_DISABLE;
6305
		}
6306
		I915_WRITE(CURCNTR(pipe), cntl);
2327 Serge 6307
 
3031 serge 6308
		intel_crtc->cursor_visible = visible;
6309
	}
6310
	/* and commit changes on next vblank */
6311
	I915_WRITE(CURBASE(pipe), base);
6312
}
2327 Serge 6313
 
3031 serge 6314
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6315
{
6316
	struct drm_device *dev = crtc->dev;
6317
	struct drm_i915_private *dev_priv = dev->dev_private;
6318
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6319
	int pipe = intel_crtc->pipe;
6320
	bool visible = base != 0;
2327 Serge 6321
 
3031 serge 6322
	if (intel_crtc->cursor_visible != visible) {
6323
		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
6324
		if (base) {
6325
			cntl &= ~CURSOR_MODE;
6326
			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6327
		} else {
6328
			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6329
			cntl |= CURSOR_MODE_DISABLE;
6330
		}
6331
		I915_WRITE(CURCNTR_IVB(pipe), cntl);
2327 Serge 6332
 
3031 serge 6333
		intel_crtc->cursor_visible = visible;
6334
	}
6335
	/* and commit changes on next vblank */
6336
	I915_WRITE(CURBASE_IVB(pipe), base);
6337
}
2327 Serge 6338
 
3031 serge 6339
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6340
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6341
				     bool on)
6342
{
6343
	struct drm_device *dev = crtc->dev;
6344
	struct drm_i915_private *dev_priv = dev->dev_private;
6345
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6346
	int pipe = intel_crtc->pipe;
6347
	int x = intel_crtc->cursor_x;
6348
	int y = intel_crtc->cursor_y;
6349
	u32 base, pos;
6350
	bool visible;
2327 Serge 6351
 
3031 serge 6352
	pos = 0;
2327 Serge 6353
 
3031 serge 6354
	if (on && crtc->enabled && crtc->fb) {
6355
		base = intel_crtc->cursor_addr;
6356
		if (x > (int) crtc->fb->width)
6357
			base = 0;
2327 Serge 6358
 
3031 serge 6359
		if (y > (int) crtc->fb->height)
6360
			base = 0;
6361
	} else
6362
		base = 0;
2327 Serge 6363
 
3031 serge 6364
	if (x < 0) {
6365
		if (x + intel_crtc->cursor_width < 0)
6366
			base = 0;
2327 Serge 6367
 
3031 serge 6368
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6369
		x = -x;
6370
	}
6371
	pos |= x << CURSOR_X_SHIFT;
2327 Serge 6372
 
3031 serge 6373
	if (y < 0) {
6374
		if (y + intel_crtc->cursor_height < 0)
6375
			base = 0;
2327 Serge 6376
 
3031 serge 6377
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6378
		y = -y;
6379
	}
6380
	pos |= y << CURSOR_Y_SHIFT;
2327 Serge 6381
 
3031 serge 6382
	visible = base != 0;
6383
	if (!visible && !intel_crtc->cursor_visible)
6384
		return;
2327 Serge 6385
 
3031 serge 6386
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
6387
		I915_WRITE(CURPOS_IVB(pipe), pos);
6388
		ivb_update_cursor(crtc, base);
6389
	} else {
6390
		I915_WRITE(CURPOS(pipe), pos);
6391
		if (IS_845G(dev) || IS_I865G(dev))
6392
			i845_update_cursor(crtc, base);
6393
		else
6394
			i9xx_update_cursor(crtc, base);
6395
	}
6396
}
2327 Serge 6397
 
3031 serge 6398
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6399
				 struct drm_file *file,
6400
				 uint32_t handle,
6401
				 uint32_t width, uint32_t height)
6402
{
6403
	struct drm_device *dev = crtc->dev;
6404
	struct drm_i915_private *dev_priv = dev->dev_private;
6405
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6406
	struct drm_i915_gem_object *obj;
6407
	uint32_t addr;
6408
	int ret;
2327 Serge 6409
 
3031 serge 6410
	/* if we want to turn off the cursor ignore width and height */
6411
	if (!handle) {
6412
		DRM_DEBUG_KMS("cursor off\n");
6413
		addr = 0;
6414
		obj = NULL;
6415
		mutex_lock(&dev->struct_mutex);
6416
		goto finish;
6417
	}
2327 Serge 6418
 
3031 serge 6419
	/* Currently we only support 64x64 cursors */
6420
	if (width != 64 || height != 64) {
6421
		DRM_ERROR("we currently only support 64x64 cursors\n");
6422
		return -EINVAL;
6423
	}
2327 Serge 6424
 
3031 serge 6425
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
6426
	if (&obj->base == NULL)
6427
		return -ENOENT;
2327 Serge 6428
 
3031 serge 6429
	if (obj->base.size < width * height * 4) {
6430
		DRM_ERROR("buffer is to small\n");
6431
		ret = -ENOMEM;
6432
		goto fail;
6433
	}
2327 Serge 6434
 
3031 serge 6435
	/* we only need to pin inside GTT if cursor is non-phy */
6436
	mutex_lock(&dev->struct_mutex);
6437
	if (!dev_priv->info->cursor_needs_physical) {
6438
		if (obj->tiling_mode) {
6439
			DRM_ERROR("cursor cannot be tiled\n");
6440
			ret = -EINVAL;
6441
			goto fail_locked;
6442
		}
2327 Serge 6443
 
3031 serge 6444
		ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
6445
		if (ret) {
6446
			DRM_ERROR("failed to move cursor bo into the GTT\n");
6447
			goto fail_locked;
6448
		}
2327 Serge 6449
 
3031 serge 6450
		ret = i915_gem_object_put_fence(obj);
6451
		if (ret) {
6452
			DRM_ERROR("failed to release fence for cursor");
6453
			goto fail_unpin;
6454
		}
2327 Serge 6455
 
3031 serge 6456
		addr = obj->gtt_offset;
6457
	} else {
6458
		int align = IS_I830(dev) ? 16 * 1024 : 256;
6459
		ret = i915_gem_attach_phys_object(dev, obj,
6460
						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6461
						  align);
6462
		if (ret) {
6463
			DRM_ERROR("failed to attach phys object\n");
6464
			goto fail_locked;
6465
		}
6466
		addr = obj->phys_obj->handle->busaddr;
6467
	}
2327 Serge 6468
 
3031 serge 6469
	if (IS_GEN2(dev))
6470
		I915_WRITE(CURSIZE, (height << 12) | width);
2327 Serge 6471
 
3031 serge 6472
 finish:
6473
	if (intel_crtc->cursor_bo) {
6474
		if (dev_priv->info->cursor_needs_physical) {
6475
			if (intel_crtc->cursor_bo != obj)
6476
				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6477
		} else
6478
			i915_gem_object_unpin(intel_crtc->cursor_bo);
6479
		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6480
	}
2327 Serge 6481
 
3031 serge 6482
	mutex_unlock(&dev->struct_mutex);
2327 Serge 6483
 
3031 serge 6484
	intel_crtc->cursor_addr = addr;
6485
	intel_crtc->cursor_bo = obj;
6486
	intel_crtc->cursor_width = width;
6487
	intel_crtc->cursor_height = height;
2327 Serge 6488
 
3031 serge 6489
//   intel_crtc_update_cursor(crtc, true);
2327 Serge 6490
 
3031 serge 6491
	return 0;
6492
fail_unpin:
6493
	i915_gem_object_unpin(obj);
6494
fail_locked:
6495
	mutex_unlock(&dev->struct_mutex);
6496
fail:
6497
	drm_gem_object_unreference_unlocked(&obj->base);
6498
	return ret;
6499
}
2327 Serge 6500
 
3031 serge 6501
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6502
{
6503
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6504
 
6505
	intel_crtc->cursor_x = x;
6506
	intel_crtc->cursor_y = y;
6507
 
6508
//   intel_crtc_update_cursor(crtc, true);
6509
 
6510
	return 0;
6511
}
6512
#endif
6513
 
2332 Serge 6514
/** Sets the color ramps on behalf of RandR */
6515
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6516
				 u16 blue, int regno)
6517
{
6518
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6519
 
2332 Serge 6520
	intel_crtc->lut_r[regno] = red >> 8;
6521
	intel_crtc->lut_g[regno] = green >> 8;
6522
	intel_crtc->lut_b[regno] = blue >> 8;
6523
}
2327 Serge 6524
 
2332 Serge 6525
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6526
			     u16 *blue, int regno)
6527
{
6528
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6529
 
2332 Serge 6530
	*red = intel_crtc->lut_r[regno] << 8;
6531
	*green = intel_crtc->lut_g[regno] << 8;
6532
	*blue = intel_crtc->lut_b[regno] << 8;
6533
}
2327 Serge 6534
 
2330 Serge 6535
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6536
				 u16 *blue, uint32_t start, uint32_t size)
6537
{
6538
	int end = (start + size > 256) ? 256 : start + size, i;
6539
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6540
 
2330 Serge 6541
	for (i = start; i < end; i++) {
6542
		intel_crtc->lut_r[i] = red[i] >> 8;
6543
		intel_crtc->lut_g[i] = green[i] >> 8;
6544
		intel_crtc->lut_b[i] = blue[i] >> 8;
6545
	}
2327 Serge 6546
 
2330 Serge 6547
	intel_crtc_load_lut(crtc);
6548
}
2327 Serge 6549
 
2330 Serge 6550
/**
6551
 * Get a pipe with a simple mode set on it for doing load-based monitor
6552
 * detection.
6553
 *
6554
 * It will be up to the load-detect code to adjust the pipe as appropriate for
6555
 * its requirements.  The pipe will be connected to no other encoders.
6556
 *
6557
 * Currently this code will only succeed if there is a pipe with no encoders
6558
 * configured for it.  In the future, it could choose to temporarily disable
6559
 * some outputs to free up a pipe for its use.
6560
 *
6561
 * \return crtc, or NULL if no pipes are available.
6562
 */
2327 Serge 6563
 
2330 Serge 6564
/* VESA 640x480x72Hz mode to set on the pipe */
6565
static struct drm_display_mode load_detect_mode = {
6566
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6567
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6568
};
2327 Serge 6569
 
3031 serge 6570
static struct drm_framebuffer *
6571
intel_framebuffer_create(struct drm_device *dev,
6572
			 struct drm_mode_fb_cmd2 *mode_cmd,
6573
			 struct drm_i915_gem_object *obj)
6574
{
6575
	struct intel_framebuffer *intel_fb;
6576
	int ret;
2327 Serge 6577
 
3031 serge 6578
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6579
	if (!intel_fb) {
6580
		drm_gem_object_unreference_unlocked(&obj->base);
6581
		return ERR_PTR(-ENOMEM);
6582
	}
2327 Serge 6583
 
3031 serge 6584
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6585
	if (ret) {
6586
		drm_gem_object_unreference_unlocked(&obj->base);
6587
		kfree(intel_fb);
6588
		return ERR_PTR(ret);
6589
	}
2327 Serge 6590
 
3031 serge 6591
	return &intel_fb->base;
6592
}
2327 Serge 6593
 
2330 Serge 6594
static u32
6595
intel_framebuffer_pitch_for_width(int width, int bpp)
6596
{
6597
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
6598
	return ALIGN(pitch, 64);
6599
}
2327 Serge 6600
 
2330 Serge 6601
static u32
6602
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6603
{
6604
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6605
	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
6606
}
2327 Serge 6607
 
2330 Serge 6608
static struct drm_framebuffer *
6609
intel_framebuffer_create_for_mode(struct drm_device *dev,
6610
				  struct drm_display_mode *mode,
6611
				  int depth, int bpp)
6612
{
6613
	struct drm_i915_gem_object *obj;
3243 Serge 6614
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2327 Serge 6615
 
2330 Serge 6616
//	obj = i915_gem_alloc_object(dev,
6617
//				    intel_framebuffer_size_for_mode(mode, bpp));
6618
//	if (obj == NULL)
6619
		return ERR_PTR(-ENOMEM);
2327 Serge 6620
 
2330 Serge 6621
//	mode_cmd.width = mode->hdisplay;
6622
//	mode_cmd.height = mode->vdisplay;
6623
//	mode_cmd.depth = depth;
6624
//	mode_cmd.bpp = bpp;
6625
//	mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
2327 Serge 6626
 
2330 Serge 6627
//	return intel_framebuffer_create(dev, &mode_cmd, obj);
6628
}
2327 Serge 6629
 
2330 Serge 6630
static struct drm_framebuffer *
6631
mode_fits_in_fbdev(struct drm_device *dev,
6632
		   struct drm_display_mode *mode)
6633
{
6634
	struct drm_i915_private *dev_priv = dev->dev_private;
6635
	struct drm_i915_gem_object *obj;
6636
	struct drm_framebuffer *fb;
2327 Serge 6637
 
2330 Serge 6638
//	if (dev_priv->fbdev == NULL)
6639
//		return NULL;
2327 Serge 6640
 
2330 Serge 6641
//	obj = dev_priv->fbdev->ifb.obj;
6642
//	if (obj == NULL)
6643
		return NULL;
2327 Serge 6644
 
2330 Serge 6645
//	if (obj->base.size < mode->vdisplay * fb->pitch)
3031 serge 6646
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
6647
							       fb->bits_per_pixel))
2330 Serge 6648
//		return NULL;
2327 Serge 6649
 
3031 serge 6650
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
6651
		return NULL;
6652
 
2330 Serge 6653
//	return fb;
6654
}
2327 Serge 6655
 
3031 serge 6656
bool intel_get_load_detect_pipe(struct drm_connector *connector,
2330 Serge 6657
				struct drm_display_mode *mode,
6658
				struct intel_load_detect_pipe *old)
6659
{
6660
	struct intel_crtc *intel_crtc;
3031 serge 6661
	struct intel_encoder *intel_encoder =
6662
		intel_attached_encoder(connector);
2330 Serge 6663
	struct drm_crtc *possible_crtc;
6664
	struct drm_encoder *encoder = &intel_encoder->base;
6665
	struct drm_crtc *crtc = NULL;
6666
	struct drm_device *dev = encoder->dev;
3031 serge 6667
	struct drm_framebuffer *fb;
2330 Serge 6668
	int i = -1;
2327 Serge 6669
 
2330 Serge 6670
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6671
		      connector->base.id, drm_get_connector_name(connector),
6672
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 6673
 
2330 Serge 6674
	/*
6675
	 * Algorithm gets a little messy:
6676
	 *
6677
	 *   - if the connector already has an assigned crtc, use it (but make
6678
	 *     sure it's on first)
6679
	 *
6680
	 *   - try to find the first unused crtc that can drive this connector,
6681
	 *     and use that if we find one
6682
	 */
2327 Serge 6683
 
2330 Serge 6684
	/* See if we already have a CRTC for this connector */
6685
	if (encoder->crtc) {
6686
		crtc = encoder->crtc;
2327 Serge 6687
 
3031 serge 6688
		old->dpms_mode = connector->dpms;
2330 Serge 6689
		old->load_detect_temp = false;
2327 Serge 6690
 
2330 Serge 6691
		/* Make sure the crtc and connector are running */
3031 serge 6692
		if (connector->dpms != DRM_MODE_DPMS_ON)
6693
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
2327 Serge 6694
 
2330 Serge 6695
		return true;
6696
	}
2327 Serge 6697
 
2330 Serge 6698
	/* Find an unused one (if possible) */
6699
	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6700
		i++;
6701
		if (!(encoder->possible_crtcs & (1 << i)))
6702
			continue;
6703
		if (!possible_crtc->enabled) {
6704
			crtc = possible_crtc;
6705
			break;
6706
		}
6707
	}
2327 Serge 6708
 
2330 Serge 6709
	/*
6710
	 * If we didn't find an unused CRTC, don't use any.
6711
	 */
6712
	if (!crtc) {
6713
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
6714
		return false;
6715
	}
2327 Serge 6716
 
3031 serge 6717
	intel_encoder->new_crtc = to_intel_crtc(crtc);
6718
	to_intel_connector(connector)->new_encoder = intel_encoder;
2327 Serge 6719
 
2330 Serge 6720
	intel_crtc = to_intel_crtc(crtc);
3031 serge 6721
	old->dpms_mode = connector->dpms;
2330 Serge 6722
	old->load_detect_temp = true;
6723
	old->release_fb = NULL;
2327 Serge 6724
 
2330 Serge 6725
	if (!mode)
6726
		mode = &load_detect_mode;
2327 Serge 6727
 
2330 Serge 6728
	/* We need a framebuffer large enough to accommodate all accesses
6729
	 * that the plane may generate whilst we perform load detection.
6730
	 * We can not rely on the fbcon either being present (we get called
6731
	 * during its initialisation to detect all boot displays, or it may
6732
	 * not even exist) or that it is large enough to satisfy the
6733
	 * requested mode.
6734
	 */
3031 serge 6735
	fb = mode_fits_in_fbdev(dev, mode);
6736
	if (fb == NULL) {
2330 Serge 6737
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
3031 serge 6738
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
6739
		old->release_fb = fb;
2330 Serge 6740
	} else
6741
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
3031 serge 6742
	if (IS_ERR(fb)) {
2330 Serge 6743
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
3243 Serge 6744
		return false;
2330 Serge 6745
	}
2327 Serge 6746
 
3031 serge 6747
	if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
2330 Serge 6748
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6749
		if (old->release_fb)
6750
			old->release_fb->funcs->destroy(old->release_fb);
3243 Serge 6751
		return false;
2330 Serge 6752
	}
2327 Serge 6753
 
2330 Serge 6754
	/* let the connector get through one full cycle before testing */
6755
	intel_wait_for_vblank(dev, intel_crtc->pipe);
6756
	return true;
6757
}
2327 Serge 6758
 
3031 serge 6759
void intel_release_load_detect_pipe(struct drm_connector *connector,
2330 Serge 6760
				    struct intel_load_detect_pipe *old)
6761
{
3031 serge 6762
	struct intel_encoder *intel_encoder =
6763
		intel_attached_encoder(connector);
2330 Serge 6764
	struct drm_encoder *encoder = &intel_encoder->base;
2327 Serge 6765
 
2330 Serge 6766
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6767
		      connector->base.id, drm_get_connector_name(connector),
6768
		      encoder->base.id, drm_get_encoder_name(encoder));
2327 Serge 6769
 
2330 Serge 6770
	if (old->load_detect_temp) {
3031 serge 6771
		struct drm_crtc *crtc = encoder->crtc;
2327 Serge 6772
 
3031 serge 6773
		to_intel_connector(connector)->new_encoder = NULL;
6774
		intel_encoder->new_crtc = NULL;
6775
		intel_set_mode(crtc, NULL, 0, 0, NULL);
6776
 
2330 Serge 6777
		if (old->release_fb)
6778
			old->release_fb->funcs->destroy(old->release_fb);
2327 Serge 6779
 
2330 Serge 6780
		return;
6781
	}
2327 Serge 6782
 
2330 Serge 6783
	/* Switch crtc and encoder back off if necessary */
3031 serge 6784
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
6785
		connector->funcs->dpms(connector, old->dpms_mode);
2330 Serge 6786
}
2327 Serge 6787
 
2330 Serge 6788
/* Returns the clock of the currently programmed mode of the given pipe. */
6789
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6790
{
6791
	struct drm_i915_private *dev_priv = dev->dev_private;
6792
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6793
	int pipe = intel_crtc->pipe;
6794
	u32 dpll = I915_READ(DPLL(pipe));
6795
	u32 fp;
6796
	intel_clock_t clock;
2327 Serge 6797
 
2330 Serge 6798
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6799
		fp = I915_READ(FP0(pipe));
6800
	else
6801
		fp = I915_READ(FP1(pipe));
2327 Serge 6802
 
2330 Serge 6803
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6804
	if (IS_PINEVIEW(dev)) {
6805
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6806
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6807
	} else {
6808
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6809
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6810
	}
2327 Serge 6811
 
2330 Serge 6812
	if (!IS_GEN2(dev)) {
6813
		if (IS_PINEVIEW(dev))
6814
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6815
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6816
		else
6817
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6818
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
2327 Serge 6819
 
2330 Serge 6820
		switch (dpll & DPLL_MODE_MASK) {
6821
		case DPLLB_MODE_DAC_SERIAL:
6822
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6823
				5 : 10;
6824
			break;
6825
		case DPLLB_MODE_LVDS:
6826
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6827
				7 : 14;
6828
			break;
6829
		default:
6830
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6831
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
6832
			return 0;
6833
		}
2327 Serge 6834
 
2330 Serge 6835
		/* XXX: Handle the 100Mhz refclk */
6836
		intel_clock(dev, 96000, &clock);
6837
	} else {
6838
		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
2327 Serge 6839
 
2330 Serge 6840
		if (is_lvds) {
6841
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6842
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6843
			clock.p2 = 14;
2327 Serge 6844
 
2330 Serge 6845
			if ((dpll & PLL_REF_INPUT_MASK) ==
6846
			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6847
				/* XXX: might not be 66MHz */
6848
				intel_clock(dev, 66000, &clock);
6849
			} else
6850
				intel_clock(dev, 48000, &clock);
6851
		} else {
6852
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6853
				clock.p1 = 2;
6854
			else {
6855
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6856
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6857
			}
6858
			if (dpll & PLL_P2_DIVIDE_BY_4)
6859
				clock.p2 = 4;
6860
			else
6861
				clock.p2 = 2;
2327 Serge 6862
 
2330 Serge 6863
			intel_clock(dev, 48000, &clock);
6864
		}
6865
	}
2327 Serge 6866
 
2330 Serge 6867
	/* XXX: It would be nice to validate the clocks, but we can't reuse
6868
	 * i830PllIsValid() because it relies on the xf86_config connector
6869
	 * configuration being accurate, which it isn't necessarily.
6870
	 */
2327 Serge 6871
 
2330 Serge 6872
	return clock.dot;
6873
}
2327 Serge 6874
 
2330 Serge 6875
/** Returns the currently programmed mode of the given pipe. */
6876
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6877
					     struct drm_crtc *crtc)
6878
{
6879
	struct drm_i915_private *dev_priv = dev->dev_private;
6880
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3243 Serge 6881
	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
2330 Serge 6882
	struct drm_display_mode *mode;
3243 Serge 6883
	int htot = I915_READ(HTOTAL(cpu_transcoder));
6884
	int hsync = I915_READ(HSYNC(cpu_transcoder));
6885
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
6886
	int vsync = I915_READ(VSYNC(cpu_transcoder));
2327 Serge 6887
 
2330 Serge 6888
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6889
	if (!mode)
6890
		return NULL;
6891
 
6892
	mode->clock = intel_crtc_clock_get(dev, crtc);
6893
	mode->hdisplay = (htot & 0xffff) + 1;
6894
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6895
	mode->hsync_start = (hsync & 0xffff) + 1;
6896
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6897
	mode->vdisplay = (vtot & 0xffff) + 1;
6898
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6899
	mode->vsync_start = (vsync & 0xffff) + 1;
6900
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6901
 
6902
	drm_mode_set_name(mode);
6903
 
6904
	return mode;
6905
}
6906
 
2327 Serge 6907
static void intel_increase_pllclock(struct drm_crtc *crtc)
6908
{
6909
	struct drm_device *dev = crtc->dev;
6910
	drm_i915_private_t *dev_priv = dev->dev_private;
6911
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6912
	int pipe = intel_crtc->pipe;
6913
	int dpll_reg = DPLL(pipe);
6914
	int dpll;
6915
 
6916
	if (HAS_PCH_SPLIT(dev))
6917
		return;
6918
 
6919
	if (!dev_priv->lvds_downclock_avail)
6920
		return;
6921
 
6922
	dpll = I915_READ(dpll_reg);
6923
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
6924
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
6925
 
3031 serge 6926
		assert_panel_unlocked(dev_priv, pipe);
2327 Serge 6927
 
6928
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
6929
		I915_WRITE(dpll_reg, dpll);
6930
		intel_wait_for_vblank(dev, pipe);
6931
 
6932
		dpll = I915_READ(dpll_reg);
6933
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
6934
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
6935
	}
6936
}
6937
 
3031 serge 6938
static void intel_decrease_pllclock(struct drm_crtc *crtc)
6939
{
6940
	struct drm_device *dev = crtc->dev;
6941
	drm_i915_private_t *dev_priv = dev->dev_private;
6942
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 6943
 
3031 serge 6944
	if (HAS_PCH_SPLIT(dev))
6945
		return;
2327 Serge 6946
 
3031 serge 6947
	if (!dev_priv->lvds_downclock_avail)
6948
		return;
2327 Serge 6949
 
3031 serge 6950
	/*
6951
	 * Since this is called by a timer, we should never get here in
6952
	 * the manual case.
6953
	 */
6954
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
6955
		int pipe = intel_crtc->pipe;
6956
		int dpll_reg = DPLL(pipe);
6957
		int dpll;
2327 Serge 6958
 
3031 serge 6959
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
2327 Serge 6960
 
3031 serge 6961
		assert_panel_unlocked(dev_priv, pipe);
2327 Serge 6962
 
3031 serge 6963
		dpll = I915_READ(dpll_reg);
6964
		dpll |= DISPLAY_RATE_SELECT_FPA1;
6965
		I915_WRITE(dpll_reg, dpll);
6966
		intel_wait_for_vblank(dev, pipe);
6967
		dpll = I915_READ(dpll_reg);
6968
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
6969
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
6970
	}
2327 Serge 6971
 
3031 serge 6972
}
2327 Serge 6973
 
3031 serge 6974
void intel_mark_busy(struct drm_device *dev)
6975
{
6976
	i915_update_gfx_val(dev->dev_private);
6977
}
2327 Serge 6978
 
3031 serge 6979
void intel_mark_idle(struct drm_device *dev)
6980
{
6981
}
2327 Serge 6982
 
3031 serge 6983
void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
6984
{
6985
	struct drm_device *dev = obj->base.dev;
6986
	struct drm_crtc *crtc;
2327 Serge 6987
 
3263 Serge 6988
    ENTER();
6989
 
3031 serge 6990
	if (!i915_powersave)
6991
		return;
2327 Serge 6992
 
3031 serge 6993
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6994
		if (!crtc->fb)
6995
			continue;
2327 Serge 6996
 
3031 serge 6997
		if (to_intel_framebuffer(crtc->fb)->obj == obj)
6998
			intel_increase_pllclock(crtc);
6999
	}
7000
}
2327 Serge 7001
 
3031 serge 7002
void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
7003
{
7004
	struct drm_device *dev = obj->base.dev;
7005
	struct drm_crtc *crtc;
2327 Serge 7006
 
3031 serge 7007
	if (!i915_powersave)
7008
		return;
2327 Serge 7009
 
3031 serge 7010
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7011
		if (!crtc->fb)
7012
			continue;
2327 Serge 7013
 
3031 serge 7014
		if (to_intel_framebuffer(crtc->fb)->obj == obj)
7015
			intel_decrease_pllclock(crtc);
7016
	}
7017
}
2327 Serge 7018
 
2330 Serge 7019
static void intel_crtc_destroy(struct drm_crtc *crtc)
7020
{
7021
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7022
	struct drm_device *dev = crtc->dev;
7023
	struct intel_unpin_work *work;
7024
	unsigned long flags;
2327 Serge 7025
 
2330 Serge 7026
	spin_lock_irqsave(&dev->event_lock, flags);
7027
	work = intel_crtc->unpin_work;
7028
	intel_crtc->unpin_work = NULL;
7029
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 7030
 
2330 Serge 7031
	if (work) {
7032
//		cancel_work_sync(&work->work);
7033
		kfree(work);
7034
	}
2327 Serge 7035
 
2330 Serge 7036
	drm_crtc_cleanup(crtc);
2327 Serge 7037
 
2330 Serge 7038
	kfree(intel_crtc);
7039
}
2327 Serge 7040
 
3031 serge 7041
#if 0
7042
static void intel_unpin_work_fn(struct work_struct *__work)
7043
{
7044
	struct intel_unpin_work *work =
7045
		container_of(__work, struct intel_unpin_work, work);
3243 Serge 7046
	struct drm_device *dev = work->crtc->dev;
2327 Serge 7047
 
3243 Serge 7048
	mutex_lock(&dev->struct_mutex);
3031 serge 7049
	intel_unpin_fb_obj(work->old_fb_obj);
7050
	drm_gem_object_unreference(&work->pending_flip_obj->base);
7051
	drm_gem_object_unreference(&work->old_fb_obj->base);
2327 Serge 7052
 
3243 Serge 7053
	intel_update_fbc(dev);
7054
	mutex_unlock(&dev->struct_mutex);
7055
 
7056
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
7057
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
7058
 
3031 serge 7059
	kfree(work);
7060
}
2327 Serge 7061
 
3031 serge 7062
static void do_intel_finish_page_flip(struct drm_device *dev,
7063
				      struct drm_crtc *crtc)
7064
{
7065
	drm_i915_private_t *dev_priv = dev->dev_private;
7066
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7067
	struct intel_unpin_work *work;
7068
	struct drm_i915_gem_object *obj;
7069
	unsigned long flags;
2327 Serge 7070
 
3031 serge 7071
	/* Ignore early vblank irqs */
7072
	if (intel_crtc == NULL)
7073
		return;
2327 Serge 7074
 
3031 serge 7075
	spin_lock_irqsave(&dev->event_lock, flags);
7076
	work = intel_crtc->unpin_work;
3243 Serge 7077
 
7078
	/* Ensure we don't miss a work->pending update ... */
7079
	smp_rmb();
7080
 
7081
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
3031 serge 7082
		spin_unlock_irqrestore(&dev->event_lock, flags);
7083
		return;
7084
	}
2327 Serge 7085
 
3243 Serge 7086
	/* and that the unpin work is consistent wrt ->pending. */
7087
	smp_rmb();
7088
 
3031 serge 7089
	intel_crtc->unpin_work = NULL;
2327 Serge 7090
 
3243 Serge 7091
	if (work->event)
7092
		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
2327 Serge 7093
 
3031 serge 7094
	drm_vblank_put(dev, intel_crtc->pipe);
2327 Serge 7095
 
3031 serge 7096
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 7097
 
3031 serge 7098
	obj = work->old_fb_obj;
2327 Serge 7099
 
3031 serge 7100
	atomic_clear_mask(1 << intel_crtc->plane,
7101
			  &obj->pending_flip.counter);
7102
	wake_up(&dev_priv->pending_flip_queue);
2327 Serge 7103
 
3243 Serge 7104
	queue_work(dev_priv->wq, &work->work);
7105
 
3031 serge 7106
	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
7107
}
2327 Serge 7108
 
3031 serge 7109
void intel_finish_page_flip(struct drm_device *dev, int pipe)
7110
{
7111
	drm_i915_private_t *dev_priv = dev->dev_private;
7112
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2327 Serge 7113
 
3031 serge 7114
	do_intel_finish_page_flip(dev, crtc);
7115
}
2327 Serge 7116
 
3031 serge 7117
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
7118
{
7119
	drm_i915_private_t *dev_priv = dev->dev_private;
7120
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
2327 Serge 7121
 
3031 serge 7122
	do_intel_finish_page_flip(dev, crtc);
7123
}
2327 Serge 7124
 
3031 serge 7125
void intel_prepare_page_flip(struct drm_device *dev, int plane)
7126
{
7127
	drm_i915_private_t *dev_priv = dev->dev_private;
7128
	struct intel_crtc *intel_crtc =
7129
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7130
	unsigned long flags;
2327 Serge 7131
 
3243 Serge 7132
	/* NB: An MMIO update of the plane base pointer will also
7133
	 * generate a page-flip completion irq, i.e. every modeset
7134
	 * is also accompanied by a spurious intel_prepare_page_flip().
7135
	 */
3031 serge 7136
	spin_lock_irqsave(&dev->event_lock, flags);
3243 Serge 7137
	if (intel_crtc->unpin_work)
7138
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
3031 serge 7139
	spin_unlock_irqrestore(&dev->event_lock, flags);
7140
}
2327 Serge 7141
 
3243 Serge 7142
inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7143
{
7144
	/* Ensure that the work item is consistent when activating it ... */
7145
	smp_wmb();
7146
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
7147
	/* and that it is marked active as soon as the irq could fire. */
7148
	smp_wmb();
7149
}
7150
 
3031 serge 7151
static int intel_gen2_queue_flip(struct drm_device *dev,
7152
				 struct drm_crtc *crtc,
7153
				 struct drm_framebuffer *fb,
7154
				 struct drm_i915_gem_object *obj)
7155
{
7156
	struct drm_i915_private *dev_priv = dev->dev_private;
7157
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7158
	u32 flip_mask;
7159
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7160
	int ret;
2327 Serge 7161
 
3031 serge 7162
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7163
	if (ret)
7164
		goto err;
2327 Serge 7165
 
3031 serge 7166
	ret = intel_ring_begin(ring, 6);
7167
	if (ret)
7168
		goto err_unpin;
2327 Serge 7169
 
3031 serge 7170
	/* Can't queue multiple flips, so wait for the previous
7171
	 * one to finish before executing the next.
7172
	 */
7173
	if (intel_crtc->plane)
7174
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7175
	else
7176
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7177
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
7178
	intel_ring_emit(ring, MI_NOOP);
7179
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
7180
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7181
	intel_ring_emit(ring, fb->pitches[0]);
7182
	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7183
	intel_ring_emit(ring, 0); /* aux display base address, unused */
3243 Serge 7184
 
7185
	intel_mark_page_flip_active(intel_crtc);
3031 serge 7186
	intel_ring_advance(ring);
7187
	return 0;
2327 Serge 7188
 
3031 serge 7189
err_unpin:
7190
	intel_unpin_fb_obj(obj);
7191
err:
7192
	return ret;
7193
}
2327 Serge 7194
 
3031 serge 7195
static int intel_gen3_queue_flip(struct drm_device *dev,
7196
				 struct drm_crtc *crtc,
7197
				 struct drm_framebuffer *fb,
7198
				 struct drm_i915_gem_object *obj)
7199
{
7200
	struct drm_i915_private *dev_priv = dev->dev_private;
7201
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7202
	u32 flip_mask;
7203
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7204
	int ret;
2327 Serge 7205
 
3031 serge 7206
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7207
	if (ret)
7208
		goto err;
2327 Serge 7209
 
3031 serge 7210
	ret = intel_ring_begin(ring, 6);
7211
	if (ret)
7212
		goto err_unpin;
2327 Serge 7213
 
3031 serge 7214
	if (intel_crtc->plane)
7215
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7216
	else
7217
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7218
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
7219
	intel_ring_emit(ring, MI_NOOP);
7220
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
7221
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7222
	intel_ring_emit(ring, fb->pitches[0]);
7223
	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7224
	intel_ring_emit(ring, MI_NOOP);
2327 Serge 7225
 
3243 Serge 7226
	intel_mark_page_flip_active(intel_crtc);
3031 serge 7227
	intel_ring_advance(ring);
7228
	return 0;
2327 Serge 7229
 
3031 serge 7230
err_unpin:
7231
	intel_unpin_fb_obj(obj);
7232
err:
7233
	return ret;
7234
}
2327 Serge 7235
 
3031 serge 7236
static int intel_gen4_queue_flip(struct drm_device *dev,
7237
				 struct drm_crtc *crtc,
7238
				 struct drm_framebuffer *fb,
7239
				 struct drm_i915_gem_object *obj)
7240
{
7241
	struct drm_i915_private *dev_priv = dev->dev_private;
7242
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7243
	uint32_t pf, pipesrc;
7244
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7245
	int ret;
2327 Serge 7246
 
3031 serge 7247
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7248
	if (ret)
7249
		goto err;
2327 Serge 7250
 
3031 serge 7251
	ret = intel_ring_begin(ring, 4);
7252
	if (ret)
7253
		goto err_unpin;
2327 Serge 7254
 
3031 serge 7255
	/* i965+ uses the linear or tiled offsets from the
7256
	 * Display Registers (which do not change across a page-flip)
7257
	 * so we need only reprogram the base address.
7258
	 */
7259
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
7260
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7261
	intel_ring_emit(ring, fb->pitches[0]);
7262
	intel_ring_emit(ring,
7263
			(obj->gtt_offset + intel_crtc->dspaddr_offset) |
7264
			obj->tiling_mode);
2327 Serge 7265
 
3031 serge 7266
	/* XXX Enabling the panel-fitter across page-flip is so far
7267
	 * untested on non-native modes, so ignore it for now.
7268
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7269
	 */
7270
	pf = 0;
7271
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7272
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 7273
 
7274
	intel_mark_page_flip_active(intel_crtc);
3031 serge 7275
	intel_ring_advance(ring);
7276
	return 0;
2327 Serge 7277
 
3031 serge 7278
err_unpin:
7279
	intel_unpin_fb_obj(obj);
7280
err:
7281
	return ret;
7282
}
2327 Serge 7283
 
3031 serge 7284
static int intel_gen6_queue_flip(struct drm_device *dev,
7285
				 struct drm_crtc *crtc,
7286
				 struct drm_framebuffer *fb,
7287
				 struct drm_i915_gem_object *obj)
7288
{
7289
	struct drm_i915_private *dev_priv = dev->dev_private;
7290
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7291
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7292
	uint32_t pf, pipesrc;
7293
	int ret;
2327 Serge 7294
 
3031 serge 7295
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7296
	if (ret)
7297
		goto err;
2327 Serge 7298
 
3031 serge 7299
	ret = intel_ring_begin(ring, 4);
7300
	if (ret)
7301
		goto err_unpin;
2327 Serge 7302
 
3031 serge 7303
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
7304
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7305
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
7306
	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
2327 Serge 7307
 
3031 serge 7308
	/* Contrary to the suggestions in the documentation,
7309
	 * "Enable Panel Fitter" does not seem to be required when page
7310
	 * flipping with a non-native mode, and worse causes a normal
7311
	 * modeset to fail.
7312
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7313
	 */
7314
	pf = 0;
7315
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7316
	intel_ring_emit(ring, pf | pipesrc);
3243 Serge 7317
 
7318
	intel_mark_page_flip_active(intel_crtc);
3031 serge 7319
	intel_ring_advance(ring);
7320
	return 0;
2327 Serge 7321
 
3031 serge 7322
err_unpin:
7323
	intel_unpin_fb_obj(obj);
7324
err:
7325
	return ret;
7326
}
2327 Serge 7327
 
3031 serge 7328
/*
7329
 * On gen7 we currently use the blit ring because (in early silicon at least)
7330
 * the render ring doesn't give us interrpts for page flip completion, which
7331
 * means clients will hang after the first flip is queued.  Fortunately the
7332
 * blit ring generates interrupts properly, so use it instead.
7333
 */
7334
static int intel_gen7_queue_flip(struct drm_device *dev,
7335
				 struct drm_crtc *crtc,
7336
				 struct drm_framebuffer *fb,
7337
				 struct drm_i915_gem_object *obj)
7338
{
7339
	struct drm_i915_private *dev_priv = dev->dev_private;
7340
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7341
	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
7342
	uint32_t plane_bit = 0;
7343
	int ret;
2327 Serge 7344
 
3031 serge 7345
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7346
	if (ret)
7347
		goto err;
2327 Serge 7348
 
3031 serge 7349
	switch(intel_crtc->plane) {
7350
	case PLANE_A:
7351
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
7352
		break;
7353
	case PLANE_B:
7354
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
7355
		break;
7356
	case PLANE_C:
7357
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
7358
		break;
7359
	default:
7360
		WARN_ONCE(1, "unknown plane in flip command\n");
7361
		ret = -ENODEV;
7362
		goto err_unpin;
7363
	}
2327 Serge 7364
 
3031 serge 7365
	ret = intel_ring_begin(ring, 4);
7366
	if (ret)
7367
		goto err_unpin;
2327 Serge 7368
 
3031 serge 7369
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
7370
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7371
	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7372
	intel_ring_emit(ring, (MI_NOOP));
3243 Serge 7373
 
7374
	intel_mark_page_flip_active(intel_crtc);
3031 serge 7375
	intel_ring_advance(ring);
7376
	return 0;
2327 Serge 7377
 
3031 serge 7378
err_unpin:
7379
	intel_unpin_fb_obj(obj);
7380
err:
7381
	return ret;
7382
}
2327 Serge 7383
 
3031 serge 7384
static int intel_default_queue_flip(struct drm_device *dev,
7385
				    struct drm_crtc *crtc,
7386
				    struct drm_framebuffer *fb,
7387
				    struct drm_i915_gem_object *obj)
7388
{
7389
	return -ENODEV;
7390
}
2327 Serge 7391
 
3031 serge 7392
static int intel_crtc_page_flip(struct drm_crtc *crtc,
7393
				struct drm_framebuffer *fb,
7394
				struct drm_pending_vblank_event *event)
7395
{
7396
	struct drm_device *dev = crtc->dev;
7397
	struct drm_i915_private *dev_priv = dev->dev_private;
7398
	struct intel_framebuffer *intel_fb;
7399
	struct drm_i915_gem_object *obj;
7400
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7401
	struct intel_unpin_work *work;
7402
	unsigned long flags;
7403
	int ret;
2327 Serge 7404
 
3031 serge 7405
	/* Can't change pixel format via MI display flips. */
7406
	if (fb->pixel_format != crtc->fb->pixel_format)
7407
		return -EINVAL;
2327 Serge 7408
 
3031 serge 7409
	/*
7410
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
7411
	 * Note that pitch changes could also affect these register.
7412
	 */
7413
	if (INTEL_INFO(dev)->gen > 3 &&
7414
	    (fb->offsets[0] != crtc->fb->offsets[0] ||
7415
	     fb->pitches[0] != crtc->fb->pitches[0]))
7416
		return -EINVAL;
2327 Serge 7417
 
3031 serge 7418
	work = kzalloc(sizeof *work, GFP_KERNEL);
7419
	if (work == NULL)
7420
		return -ENOMEM;
2327 Serge 7421
 
3031 serge 7422
	work->event = event;
3243 Serge 7423
	work->crtc = crtc;
3031 serge 7424
	intel_fb = to_intel_framebuffer(crtc->fb);
7425
	work->old_fb_obj = intel_fb->obj;
7426
	INIT_WORK(&work->work, intel_unpin_work_fn);
2327 Serge 7427
 
3031 serge 7428
	ret = drm_vblank_get(dev, intel_crtc->pipe);
7429
	if (ret)
7430
		goto free_work;
2327 Serge 7431
 
3031 serge 7432
	/* We borrow the event spin lock for protecting unpin_work */
7433
	spin_lock_irqsave(&dev->event_lock, flags);
7434
	if (intel_crtc->unpin_work) {
7435
		spin_unlock_irqrestore(&dev->event_lock, flags);
7436
		kfree(work);
7437
		drm_vblank_put(dev, intel_crtc->pipe);
2327 Serge 7438
 
3031 serge 7439
		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
7440
		return -EBUSY;
7441
	}
7442
	intel_crtc->unpin_work = work;
7443
	spin_unlock_irqrestore(&dev->event_lock, flags);
2327 Serge 7444
 
3031 serge 7445
	intel_fb = to_intel_framebuffer(fb);
7446
	obj = intel_fb->obj;
2327 Serge 7447
 
3243 Serge 7448
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
7449
		flush_workqueue(dev_priv->wq);
7450
 
3031 serge 7451
	ret = i915_mutex_lock_interruptible(dev);
7452
	if (ret)
7453
		goto cleanup;
2327 Serge 7454
 
3031 serge 7455
	/* Reference the objects for the scheduled work. */
7456
	drm_gem_object_reference(&work->old_fb_obj->base);
7457
	drm_gem_object_reference(&obj->base);
2327 Serge 7458
 
3031 serge 7459
	crtc->fb = fb;
2327 Serge 7460
 
3031 serge 7461
	work->pending_flip_obj = obj;
2327 Serge 7462
 
3031 serge 7463
	work->enable_stall_check = true;
7464
 
7465
	/* Block clients from rendering to the new back buffer until
7466
	 * the flip occurs and the object is no longer visible.
7467
	 */
7468
	atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
3243 Serge 7469
	atomic_inc(&intel_crtc->unpin_work_count);
3031 serge 7470
 
7471
	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7472
	if (ret)
7473
		goto cleanup_pending;
7474
 
7475
	intel_disable_fbc(dev);
7476
	intel_mark_fb_busy(obj);
7477
	mutex_unlock(&dev->struct_mutex);
7478
 
7479
	trace_i915_flip_request(intel_crtc->plane, obj);
7480
 
7481
	return 0;
7482
 
7483
cleanup_pending:
3243 Serge 7484
	atomic_dec(&intel_crtc->unpin_work_count);
3031 serge 7485
	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7486
	drm_gem_object_unreference(&work->old_fb_obj->base);
7487
	drm_gem_object_unreference(&obj->base);
7488
	mutex_unlock(&dev->struct_mutex);
7489
 
7490
cleanup:
7491
	spin_lock_irqsave(&dev->event_lock, flags);
7492
	intel_crtc->unpin_work = NULL;
7493
	spin_unlock_irqrestore(&dev->event_lock, flags);
7494
 
7495
	drm_vblank_put(dev, intel_crtc->pipe);
7496
free_work:
7497
	kfree(work);
7498
 
7499
	return ret;
7500
}
7501
 
7502
#endif
7503
 
7504
static struct drm_crtc_helper_funcs intel_helper_funcs = {
7505
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
7506
	.load_lut = intel_crtc_load_lut,
7507
	.disable = intel_crtc_noop,
7508
};
7509
 
7510
bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
2330 Serge 7511
{
3031 serge 7512
	struct intel_encoder *other_encoder;
7513
	struct drm_crtc *crtc = &encoder->new_crtc->base;
2327 Serge 7514
 
3031 serge 7515
	if (WARN_ON(!crtc))
7516
		return false;
2327 Serge 7517
 
3031 serge 7518
	list_for_each_entry(other_encoder,
7519
			    &crtc->dev->mode_config.encoder_list,
7520
			    base.head) {
7521
 
7522
		if (&other_encoder->new_crtc->base != crtc ||
7523
		    encoder == other_encoder)
7524
			continue;
7525
		else
7526
			return true;
7527
	}
7528
 
7529
	return false;
7530
}
7531
 
7532
static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
7533
				  struct drm_crtc *crtc)
7534
{
7535
	struct drm_device *dev;
7536
	struct drm_crtc *tmp;
7537
	int crtc_mask = 1;
7538
 
7539
	WARN(!crtc, "checking null crtc?\n");
7540
 
7541
	dev = crtc->dev;
7542
 
7543
	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
7544
		if (tmp == crtc)
7545
			break;
7546
		crtc_mask <<= 1;
7547
	}
7548
 
7549
	if (encoder->possible_crtcs & crtc_mask)
7550
		return true;
7551
	return false;
7552
}
7553
 
7554
/**
7555
 * intel_modeset_update_staged_output_state
7556
 *
7557
 * Updates the staged output configuration state, e.g. after we've read out the
7558
 * current hw state.
7559
 */
7560
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
7561
{
7562
	struct intel_encoder *encoder;
7563
	struct intel_connector *connector;
7564
 
7565
	list_for_each_entry(connector, &dev->mode_config.connector_list,
7566
			    base.head) {
7567
		connector->new_encoder =
7568
			to_intel_encoder(connector->base.encoder);
7569
	}
7570
 
7571
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7572
			    base.head) {
7573
		encoder->new_crtc =
7574
			to_intel_crtc(encoder->base.crtc);
7575
	}
7576
}
7577
 
7578
/**
7579
 * intel_modeset_commit_output_state
7580
 *
7581
 * This function copies the stage display pipe configuration to the real one.
7582
 */
7583
static void intel_modeset_commit_output_state(struct drm_device *dev)
7584
{
7585
	struct intel_encoder *encoder;
7586
	struct intel_connector *connector;
7587
 
7588
	list_for_each_entry(connector, &dev->mode_config.connector_list,
7589
			    base.head) {
7590
		connector->base.encoder = &connector->new_encoder->base;
7591
	}
7592
 
7593
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7594
			    base.head) {
7595
		encoder->base.crtc = &encoder->new_crtc->base;
7596
	}
7597
}
7598
 
7599
static struct drm_display_mode *
7600
intel_modeset_adjusted_mode(struct drm_crtc *crtc,
7601
			    struct drm_display_mode *mode)
7602
{
7603
	struct drm_device *dev = crtc->dev;
7604
	struct drm_display_mode *adjusted_mode;
7605
	struct drm_encoder_helper_funcs *encoder_funcs;
7606
	struct intel_encoder *encoder;
7607
 
7608
	adjusted_mode = drm_mode_duplicate(dev, mode);
7609
	if (!adjusted_mode)
7610
		return ERR_PTR(-ENOMEM);
7611
 
7612
	/* Pass our mode to the connectors and the CRTC to give them a chance to
7613
	 * adjust it according to limitations or connector properties, and also
7614
	 * a chance to reject the mode entirely.
2330 Serge 7615
	 */
3031 serge 7616
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7617
			    base.head) {
2327 Serge 7618
 
3031 serge 7619
		if (&encoder->new_crtc->base != crtc)
7620
			continue;
7621
		encoder_funcs = encoder->base.helper_private;
7622
		if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
7623
						adjusted_mode))) {
7624
			DRM_DEBUG_KMS("Encoder fixup failed\n");
7625
			goto fail;
7626
		}
7627
	}
2327 Serge 7628
 
3031 serge 7629
	if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
7630
		DRM_DEBUG_KMS("CRTC fixup failed\n");
7631
		goto fail;
7632
	}
7633
	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
2327 Serge 7634
 
3031 serge 7635
	return adjusted_mode;
7636
fail:
7637
	drm_mode_destroy(dev, adjusted_mode);
7638
	return ERR_PTR(-EINVAL);
7639
}
2327 Serge 7640
 
3031 serge 7641
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
7642
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
7643
static void
7644
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
7645
			     unsigned *prepare_pipes, unsigned *disable_pipes)
7646
{
7647
	struct intel_crtc *intel_crtc;
7648
	struct drm_device *dev = crtc->dev;
7649
	struct intel_encoder *encoder;
7650
	struct intel_connector *connector;
7651
	struct drm_crtc *tmp_crtc;
7652
 
7653
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
7654
 
7655
	/* Check which crtcs have changed outputs connected to them, these need
7656
	 * to be part of the prepare_pipes mask. We don't (yet) support global
7657
	 * modeset across multiple crtcs, so modeset_pipes will only have one
7658
	 * bit set at most. */
7659
	list_for_each_entry(connector, &dev->mode_config.connector_list,
7660
			    base.head) {
7661
		if (connector->base.encoder == &connector->new_encoder->base)
7662
			continue;
7663
 
7664
		if (connector->base.encoder) {
7665
			tmp_crtc = connector->base.encoder->crtc;
7666
 
7667
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
7668
		}
7669
 
7670
		if (connector->new_encoder)
7671
			*prepare_pipes |=
7672
				1 << connector->new_encoder->new_crtc->pipe;
7673
	}
7674
 
7675
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7676
			    base.head) {
7677
		if (encoder->base.crtc == &encoder->new_crtc->base)
7678
			continue;
7679
 
7680
		if (encoder->base.crtc) {
7681
			tmp_crtc = encoder->base.crtc;
7682
 
7683
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
7684
		}
7685
 
7686
		if (encoder->new_crtc)
7687
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
7688
	}
7689
 
7690
	/* Check for any pipes that will be fully disabled ... */
7691
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
7692
			    base.head) {
7693
		bool used = false;
7694
 
7695
		/* Don't try to disable disabled crtcs. */
7696
		if (!intel_crtc->base.enabled)
7697
			continue;
7698
 
7699
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7700
				    base.head) {
7701
			if (encoder->new_crtc == intel_crtc)
7702
				used = true;
7703
		}
7704
 
7705
		if (!used)
7706
			*disable_pipes |= 1 << intel_crtc->pipe;
7707
	}
7708
 
7709
 
7710
	/* set_mode is also used to update properties on life display pipes. */
7711
	intel_crtc = to_intel_crtc(crtc);
7712
	if (crtc->enabled)
7713
		*prepare_pipes |= 1 << intel_crtc->pipe;
7714
 
7715
	/* We only support modeset on one single crtc, hence we need to do that
7716
	 * only for the passed in crtc iff we change anything else than just
7717
	 * disable crtcs.
7718
	 *
7719
	 * This is actually not true, to be fully compatible with the old crtc
7720
	 * helper we automatically disable _any_ output (i.e. doesn't need to be
7721
	 * connected to the crtc we're modesetting on) if it's disconnected.
7722
	 * Which is a rather nutty api (since changed the output configuration
7723
	 * without userspace's explicit request can lead to confusion), but
7724
	 * alas. Hence we currently need to modeset on all pipes we prepare. */
7725
	if (*prepare_pipes)
7726
		*modeset_pipes = *prepare_pipes;
7727
 
7728
	/* ... and mask these out. */
7729
	*modeset_pipes &= ~(*disable_pipes);
7730
	*prepare_pipes &= ~(*disable_pipes);
2330 Serge 7731
}
2327 Serge 7732
 
3031 serge 7733
static bool intel_crtc_in_use(struct drm_crtc *crtc)
2330 Serge 7734
{
3031 serge 7735
	struct drm_encoder *encoder;
2330 Serge 7736
	struct drm_device *dev = crtc->dev;
2327 Serge 7737
 
3031 serge 7738
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
7739
		if (encoder->crtc == crtc)
7740
			return true;
7741
 
7742
	return false;
7743
}
7744
 
7745
static void
7746
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
7747
{
7748
	struct intel_encoder *intel_encoder;
7749
	struct intel_crtc *intel_crtc;
7750
	struct drm_connector *connector;
7751
 
7752
	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
7753
			    base.head) {
7754
		if (!intel_encoder->base.crtc)
7755
			continue;
7756
 
7757
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
7758
 
7759
		if (prepare_pipes & (1 << intel_crtc->pipe))
7760
			intel_encoder->connectors_active = false;
7761
	}
7762
 
7763
	intel_modeset_commit_output_state(dev);
7764
 
7765
	/* Update computed state. */
7766
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
7767
			    base.head) {
7768
		intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
7769
	}
7770
 
7771
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7772
		if (!connector->encoder || !connector->encoder->crtc)
7773
			continue;
7774
 
7775
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
7776
 
7777
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
7778
			struct drm_property *dpms_property =
7779
				dev->mode_config.dpms_property;
7780
 
7781
			connector->dpms = DRM_MODE_DPMS_ON;
3243 Serge 7782
			drm_object_property_set_value(&connector->base,
3031 serge 7783
							 dpms_property,
7784
							 DRM_MODE_DPMS_ON);
7785
 
7786
			intel_encoder = to_intel_encoder(connector->encoder);
7787
			intel_encoder->connectors_active = true;
7788
		}
7789
	}
7790
 
7791
}
7792
 
7793
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
7794
	list_for_each_entry((intel_crtc), \
7795
			    &(dev)->mode_config.crtc_list, \
7796
			    base.head) \
7797
		if (mask & (1 <<(intel_crtc)->pipe)) \
7798
 
7799
void
7800
intel_modeset_check_state(struct drm_device *dev)
7801
{
7802
	struct intel_crtc *crtc;
7803
	struct intel_encoder *encoder;
7804
	struct intel_connector *connector;
7805
 
7806
	list_for_each_entry(connector, &dev->mode_config.connector_list,
7807
			    base.head) {
7808
		/* This also checks the encoder/connector hw state with the
7809
		 * ->get_hw_state callbacks. */
7810
		intel_connector_check_state(connector);
7811
 
7812
		WARN(&connector->new_encoder->base != connector->base.encoder,
7813
		     "connector's staged encoder doesn't match current encoder\n");
7814
	}
7815
 
7816
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7817
			    base.head) {
7818
		bool enabled = false;
7819
		bool active = false;
7820
		enum pipe pipe, tracked_pipe;
7821
 
7822
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
7823
			      encoder->base.base.id,
7824
			      drm_get_encoder_name(&encoder->base));
7825
 
7826
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
7827
		     "encoder's stage crtc doesn't match current crtc\n");
7828
		WARN(encoder->connectors_active && !encoder->base.crtc,
7829
		     "encoder's active_connectors set, but no crtc\n");
7830
 
7831
		list_for_each_entry(connector, &dev->mode_config.connector_list,
7832
				    base.head) {
7833
			if (connector->base.encoder != &encoder->base)
7834
				continue;
7835
			enabled = true;
7836
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
7837
				active = true;
7838
		}
7839
		WARN(!!encoder->base.crtc != enabled,
7840
		     "encoder's enabled state mismatch "
7841
		     "(expected %i, found %i)\n",
7842
		     !!encoder->base.crtc, enabled);
7843
		WARN(active && !encoder->base.crtc,
7844
		     "active encoder with no crtc\n");
7845
 
7846
		WARN(encoder->connectors_active != active,
7847
		     "encoder's computed active state doesn't match tracked active state "
7848
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
7849
 
7850
		active = encoder->get_hw_state(encoder, &pipe);
7851
		WARN(active != encoder->connectors_active,
7852
		     "encoder's hw state doesn't match sw tracking "
7853
		     "(expected %i, found %i)\n",
7854
		     encoder->connectors_active, active);
7855
 
7856
		if (!encoder->base.crtc)
7857
			continue;
7858
 
7859
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
7860
		WARN(active && pipe != tracked_pipe,
7861
		     "active encoder's pipe doesn't match"
7862
		     "(expected %i, found %i)\n",
7863
		     tracked_pipe, pipe);
7864
 
7865
	}
7866
 
7867
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7868
			    base.head) {
7869
		bool enabled = false;
7870
		bool active = false;
7871
 
7872
		DRM_DEBUG_KMS("[CRTC:%d]\n",
7873
			      crtc->base.base.id);
7874
 
7875
		WARN(crtc->active && !crtc->base.enabled,
7876
		     "active crtc, but not enabled in sw tracking\n");
7877
 
7878
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7879
				    base.head) {
7880
			if (encoder->base.crtc != &crtc->base)
7881
				continue;
7882
			enabled = true;
7883
			if (encoder->connectors_active)
7884
				active = true;
7885
		}
7886
		WARN(active != crtc->active,
7887
		     "crtc's computed active state doesn't match tracked active state "
7888
		     "(expected %i, found %i)\n", active, crtc->active);
7889
		WARN(enabled != crtc->base.enabled,
7890
		     "crtc's computed enabled state doesn't match tracked enabled state "
7891
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
7892
 
7893
		assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
7894
	}
7895
}
7896
 
7897
bool intel_set_mode(struct drm_crtc *crtc,
7898
		    struct drm_display_mode *mode,
7899
		    int x, int y, struct drm_framebuffer *fb)
7900
{
7901
	struct drm_device *dev = crtc->dev;
7902
	drm_i915_private_t *dev_priv = dev->dev_private;
7903
	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
7904
	struct intel_crtc *intel_crtc;
7905
	unsigned disable_pipes, prepare_pipes, modeset_pipes;
7906
	bool ret = true;
7907
 
7908
	intel_modeset_affected_pipes(crtc, &modeset_pipes,
7909
				     &prepare_pipes, &disable_pipes);
7910
 
7911
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
7912
		      modeset_pipes, prepare_pipes, disable_pipes);
7913
 
7914
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7915
		intel_crtc_disable(&intel_crtc->base);
7916
 
7917
	saved_hwmode = crtc->hwmode;
7918
	saved_mode = crtc->mode;
7919
 
7920
	/* Hack: Because we don't (yet) support global modeset on multiple
7921
	 * crtcs, we don't keep track of the new mode for more than one crtc.
7922
	 * Hence simply check whether any bit is set in modeset_pipes in all the
7923
	 * pieces of code that are not yet converted to deal with mutliple crtcs
7924
	 * changing their mode at the same time. */
7925
	adjusted_mode = NULL;
7926
	if (modeset_pipes) {
7927
		adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
7928
		if (IS_ERR(adjusted_mode)) {
7929
			return false;
7930
		}
7931
	}
7932
 
7933
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
7934
		if (intel_crtc->base.enabled)
7935
			dev_priv->display.crtc_disable(&intel_crtc->base);
7936
	}
7937
 
7938
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
7939
	 * to set it here already despite that we pass it down the callchain.
2330 Serge 7940
	 */
3031 serge 7941
	if (modeset_pipes)
7942
		crtc->mode = *mode;
2327 Serge 7943
 
3031 serge 7944
	/* Only after disabling all output pipelines that will be changed can we
7945
	 * update the the output configuration. */
7946
	intel_modeset_update_state(dev, prepare_pipes);
7947
 
3243 Serge 7948
	if (dev_priv->display.modeset_global_resources)
7949
		dev_priv->display.modeset_global_resources(dev);
7950
 
3031 serge 7951
	/* Set up the DPLL and any encoders state that needs to adjust or depend
7952
	 * on the DPLL.
2330 Serge 7953
	 */
3031 serge 7954
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
7955
		ret = !intel_crtc_mode_set(&intel_crtc->base,
7956
					   mode, adjusted_mode,
7957
					   x, y, fb);
7958
		if (!ret)
7959
		    goto done;
7960
	}
7961
 
7962
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
7963
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
7964
		dev_priv->display.crtc_enable(&intel_crtc->base);
7965
 
7966
	if (modeset_pipes) {
7967
		/* Store real post-adjustment hardware mode. */
7968
		crtc->hwmode = *adjusted_mode;
7969
 
7970
		/* Calculate and store various constants which
7971
		 * are later needed by vblank and swap-completion
7972
		 * timestamping. They are derived from true hwmode.
7973
		 */
7974
		drm_calc_timestamping_constants(crtc);
7975
	}
7976
 
7977
	/* FIXME: add subpixel order */
7978
done:
7979
	drm_mode_destroy(dev, adjusted_mode);
7980
	if (!ret && crtc->enabled) {
7981
		crtc->hwmode = saved_hwmode;
7982
		crtc->mode = saved_mode;
7983
	} else {
7984
		intel_modeset_check_state(dev);
7985
	}
7986
 
7987
	return ret;
2330 Serge 7988
}
2327 Serge 7989
 
3031 serge 7990
#undef for_each_intel_crtc_masked
2327 Serge 7991
 
3031 serge 7992
static void intel_set_config_free(struct intel_set_config *config)
7993
{
7994
	if (!config)
7995
		return;
7996
 
7997
	kfree(config->save_connector_encoders);
7998
	kfree(config->save_encoder_crtcs);
7999
	kfree(config);
8000
}
8001
 
8002
static int intel_set_config_save_state(struct drm_device *dev,
8003
				       struct intel_set_config *config)
8004
{
8005
	struct drm_encoder *encoder;
8006
	struct drm_connector *connector;
8007
	int count;
8008
 
8009
	config->save_encoder_crtcs =
8010
		kcalloc(dev->mode_config.num_encoder,
8011
			sizeof(struct drm_crtc *), GFP_KERNEL);
8012
	if (!config->save_encoder_crtcs)
8013
		return -ENOMEM;
8014
 
8015
	config->save_connector_encoders =
8016
		kcalloc(dev->mode_config.num_connector,
8017
			sizeof(struct drm_encoder *), GFP_KERNEL);
8018
	if (!config->save_connector_encoders)
8019
		return -ENOMEM;
8020
 
8021
	/* Copy data. Note that driver private data is not affected.
8022
	 * Should anything bad happen only the expected state is
8023
	 * restored, not the drivers personal bookkeeping.
8024
	 */
8025
	count = 0;
8026
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
8027
		config->save_encoder_crtcs[count++] = encoder->crtc;
8028
	}
8029
 
8030
	count = 0;
8031
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
8032
		config->save_connector_encoders[count++] = connector->encoder;
8033
	}
8034
 
8035
	return 0;
8036
}
8037
 
8038
static void intel_set_config_restore_state(struct drm_device *dev,
8039
					   struct intel_set_config *config)
8040
{
8041
	struct intel_encoder *encoder;
8042
	struct intel_connector *connector;
8043
	int count;
8044
 
8045
	count = 0;
8046
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
8047
		encoder->new_crtc =
8048
			to_intel_crtc(config->save_encoder_crtcs[count++]);
8049
	}
8050
 
8051
	count = 0;
8052
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
8053
		connector->new_encoder =
8054
			to_intel_encoder(config->save_connector_encoders[count++]);
8055
	}
8056
}
8057
 
8058
static void
8059
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
8060
				      struct intel_set_config *config)
8061
{
8062
 
8063
	/* We should be able to check here if the fb has the same properties
8064
	 * and then just flip_or_move it */
8065
	if (set->crtc->fb != set->fb) {
8066
		/* If we have no fb then treat it as a full mode set */
8067
		if (set->crtc->fb == NULL) {
8068
			DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
8069
			config->mode_changed = true;
8070
		} else if (set->fb == NULL) {
8071
			config->mode_changed = true;
8072
		} else if (set->fb->depth != set->crtc->fb->depth) {
8073
			config->mode_changed = true;
8074
		} else if (set->fb->bits_per_pixel !=
8075
			   set->crtc->fb->bits_per_pixel) {
8076
			config->mode_changed = true;
8077
		} else
8078
			config->fb_changed = true;
8079
	}
8080
 
8081
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
8082
		config->fb_changed = true;
8083
 
8084
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
8085
		DRM_DEBUG_KMS("modes are different, full mode set\n");
8086
		drm_mode_debug_printmodeline(&set->crtc->mode);
8087
		drm_mode_debug_printmodeline(set->mode);
8088
		config->mode_changed = true;
8089
	}
8090
}
8091
 
8092
static int
8093
intel_modeset_stage_output_state(struct drm_device *dev,
8094
				 struct drm_mode_set *set,
8095
				 struct intel_set_config *config)
8096
{
8097
	struct drm_crtc *new_crtc;
8098
	struct intel_connector *connector;
8099
	struct intel_encoder *encoder;
8100
	int count, ro;
8101
 
8102
	/* The upper layers ensure that we either disabl a crtc or have a list
8103
	 * of connectors. For paranoia, double-check this. */
8104
	WARN_ON(!set->fb && (set->num_connectors != 0));
8105
	WARN_ON(set->fb && (set->num_connectors == 0));
8106
 
8107
	count = 0;
8108
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8109
			    base.head) {
8110
		/* Otherwise traverse passed in connector list and get encoders
8111
		 * for them. */
8112
		for (ro = 0; ro < set->num_connectors; ro++) {
8113
			if (set->connectors[ro] == &connector->base) {
8114
				connector->new_encoder = connector->encoder;
8115
				break;
8116
			}
8117
		}
8118
 
8119
		/* If we disable the crtc, disable all its connectors. Also, if
8120
		 * the connector is on the changing crtc but not on the new
8121
		 * connector list, disable it. */
8122
		if ((!set->fb || ro == set->num_connectors) &&
8123
		    connector->base.encoder &&
8124
		    connector->base.encoder->crtc == set->crtc) {
8125
			connector->new_encoder = NULL;
8126
 
8127
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
8128
				connector->base.base.id,
8129
				drm_get_connector_name(&connector->base));
8130
		}
8131
 
8132
 
8133
		if (&connector->new_encoder->base != connector->base.encoder) {
8134
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
8135
			config->mode_changed = true;
8136
		}
8137
	}
8138
	/* connector->new_encoder is now updated for all connectors. */
8139
 
8140
	/* Update crtc of enabled connectors. */
8141
	count = 0;
8142
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8143
			    base.head) {
8144
		if (!connector->new_encoder)
8145
			continue;
8146
 
8147
		new_crtc = connector->new_encoder->base.crtc;
8148
 
8149
		for (ro = 0; ro < set->num_connectors; ro++) {
8150
			if (set->connectors[ro] == &connector->base)
8151
				new_crtc = set->crtc;
8152
		}
8153
 
8154
		/* Make sure the new CRTC will work with the encoder */
8155
		if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
8156
					   new_crtc)) {
8157
			return -EINVAL;
8158
		}
8159
		connector->encoder->new_crtc = to_intel_crtc(new_crtc);
8160
 
8161
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
8162
			connector->base.base.id,
8163
			drm_get_connector_name(&connector->base),
8164
			new_crtc->base.id);
8165
	}
8166
 
8167
	/* Check for any encoders that needs to be disabled. */
8168
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8169
			    base.head) {
8170
		list_for_each_entry(connector,
8171
				    &dev->mode_config.connector_list,
8172
				    base.head) {
8173
			if (connector->new_encoder == encoder) {
8174
				WARN_ON(!connector->new_encoder->new_crtc);
8175
 
8176
				goto next_encoder;
8177
			}
8178
		}
8179
		encoder->new_crtc = NULL;
8180
next_encoder:
8181
		/* Only now check for crtc changes so we don't miss encoders
8182
		 * that will be disabled. */
8183
		if (&encoder->new_crtc->base != encoder->base.crtc) {
8184
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
8185
			config->mode_changed = true;
8186
		}
8187
	}
8188
	/* Now we've also updated encoder->new_crtc for all encoders. */
8189
 
8190
	return 0;
8191
}
8192
 
8193
static int intel_crtc_set_config(struct drm_mode_set *set)
8194
{
8195
	struct drm_device *dev;
8196
	struct drm_mode_set save_set;
8197
	struct intel_set_config *config;
8198
	int ret;
8199
 
8200
	BUG_ON(!set);
8201
	BUG_ON(!set->crtc);
8202
	BUG_ON(!set->crtc->helper_private);
8203
 
8204
	if (!set->mode)
8205
		set->fb = NULL;
8206
 
8207
	/* The fb helper likes to play gross jokes with ->mode_set_config.
8208
	 * Unfortunately the crtc helper doesn't do much at all for this case,
8209
	 * so we have to cope with this madness until the fb helper is fixed up. */
8210
	if (set->fb && set->num_connectors == 0)
8211
		return 0;
8212
 
8213
	if (set->fb) {
8214
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
8215
				set->crtc->base.id, set->fb->base.id,
8216
				(int)set->num_connectors, set->x, set->y);
8217
	} else {
8218
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
8219
	}
8220
 
8221
	dev = set->crtc->dev;
8222
 
8223
	ret = -ENOMEM;
8224
	config = kzalloc(sizeof(*config), GFP_KERNEL);
8225
	if (!config)
8226
		goto out_config;
8227
 
8228
	ret = intel_set_config_save_state(dev, config);
8229
	if (ret)
8230
		goto out_config;
8231
 
8232
	save_set.crtc = set->crtc;
8233
	save_set.mode = &set->crtc->mode;
8234
	save_set.x = set->crtc->x;
8235
	save_set.y = set->crtc->y;
8236
	save_set.fb = set->crtc->fb;
8237
 
8238
	/* Compute whether we need a full modeset, only an fb base update or no
8239
	 * change at all. In the future we might also check whether only the
8240
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
8241
	 * such cases. */
8242
	intel_set_config_compute_mode_changes(set, config);
8243
 
8244
	ret = intel_modeset_stage_output_state(dev, set, config);
8245
	if (ret)
8246
		goto fail;
8247
 
8248
	if (config->mode_changed) {
8249
		if (set->mode) {
8250
			DRM_DEBUG_KMS("attempting to set mode from"
8251
					" userspace\n");
8252
			drm_mode_debug_printmodeline(set->mode);
8253
		}
8254
 
8255
		if (!intel_set_mode(set->crtc, set->mode,
8256
				    set->x, set->y, set->fb)) {
8257
			DRM_ERROR("failed to set mode on [CRTC:%d]\n",
8258
				  set->crtc->base.id);
8259
			ret = -EINVAL;
8260
			goto fail;
8261
		}
8262
	} else if (config->fb_changed) {
8263
		ret = intel_pipe_set_base(set->crtc,
8264
					  set->x, set->y, set->fb);
8265
	}
8266
 
8267
	intel_set_config_free(config);
8268
 
8269
	return 0;
8270
 
8271
fail:
8272
	intel_set_config_restore_state(dev, config);
8273
 
8274
	/* Try to restore the config */
8275
	if (config->mode_changed &&
8276
	    !intel_set_mode(save_set.crtc, save_set.mode,
8277
			    save_set.x, save_set.y, save_set.fb))
8278
		DRM_ERROR("failed to restore config after modeset failure\n");
8279
 
8280
out_config:
8281
	intel_set_config_free(config);
8282
	return ret;
8283
}
8284
 
2330 Serge 8285
static const struct drm_crtc_funcs intel_crtc_funcs = {
8286
//	.cursor_set = intel_crtc_cursor_set,
8287
//	.cursor_move = intel_crtc_cursor_move,
8288
	.gamma_set = intel_crtc_gamma_set,
3031 serge 8289
	.set_config = intel_crtc_set_config,
2330 Serge 8290
	.destroy = intel_crtc_destroy,
8291
//	.page_flip = intel_crtc_page_flip,
8292
};
2327 Serge 8293
 
3243 Serge 8294
static void intel_cpu_pll_init(struct drm_device *dev)
8295
{
8296
	if (IS_HASWELL(dev))
8297
		intel_ddi_pll_init(dev);
8298
}
8299
 
3031 serge 8300
static void intel_pch_pll_init(struct drm_device *dev)
8301
{
8302
	drm_i915_private_t *dev_priv = dev->dev_private;
8303
	int i;
8304
 
8305
	if (dev_priv->num_pch_pll == 0) {
8306
		DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
8307
		return;
8308
	}
8309
 
8310
	for (i = 0; i < dev_priv->num_pch_pll; i++) {
8311
		dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
8312
		dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
8313
		dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
8314
	}
8315
}
8316
 
2330 Serge 8317
static void intel_crtc_init(struct drm_device *dev, int pipe)
8318
{
8319
	drm_i915_private_t *dev_priv = dev->dev_private;
8320
	struct intel_crtc *intel_crtc;
8321
	int i;
2327 Serge 8322
 
2330 Serge 8323
	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
8324
	if (intel_crtc == NULL)
8325
		return;
2327 Serge 8326
 
2330 Serge 8327
	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
2327 Serge 8328
 
2330 Serge 8329
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
8330
	for (i = 0; i < 256; i++) {
8331
		intel_crtc->lut_r[i] = i;
8332
		intel_crtc->lut_g[i] = i;
8333
		intel_crtc->lut_b[i] = i;
8334
	}
2327 Serge 8335
 
2330 Serge 8336
	/* Swap pipes & planes for FBC on pre-965 */
8337
	intel_crtc->pipe = pipe;
8338
	intel_crtc->plane = pipe;
3243 Serge 8339
	intel_crtc->cpu_transcoder = pipe;
2330 Serge 8340
	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
8341
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
8342
		intel_crtc->plane = !pipe;
8343
	}
2327 Serge 8344
 
2330 Serge 8345
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
8346
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
8347
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
8348
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
2327 Serge 8349
 
2330 Serge 8350
	intel_crtc->bpp = 24; /* default for pre-Ironlake */
2327 Serge 8351
 
2330 Serge 8352
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
8353
}
2327 Serge 8354
 
3031 serge 8355
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
8356
				struct drm_file *file)
8357
{
8358
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8359
	struct drm_mode_object *drmmode_obj;
8360
	struct intel_crtc *crtc;
2327 Serge 8361
 
3031 serge 8362
	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
8363
			DRM_MODE_OBJECT_CRTC);
2327 Serge 8364
 
3031 serge 8365
	if (!drmmode_obj) {
8366
		DRM_ERROR("no such CRTC id\n");
8367
		return -EINVAL;
8368
	}
2327 Serge 8369
 
3031 serge 8370
	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
8371
	pipe_from_crtc_id->pipe = crtc->pipe;
2327 Serge 8372
 
3031 serge 8373
	return 0;
8374
}
2327 Serge 8375
 
3031 serge 8376
static int intel_encoder_clones(struct intel_encoder *encoder)
2330 Serge 8377
{
3031 serge 8378
	struct drm_device *dev = encoder->base.dev;
8379
	struct intel_encoder *source_encoder;
2330 Serge 8380
	int index_mask = 0;
8381
	int entry = 0;
2327 Serge 8382
 
3031 serge 8383
	list_for_each_entry(source_encoder,
8384
			    &dev->mode_config.encoder_list, base.head) {
8385
 
8386
		if (encoder == source_encoder)
2330 Serge 8387
			index_mask |= (1 << entry);
3031 serge 8388
 
8389
		/* Intel hw has only one MUX where enocoders could be cloned. */
8390
		if (encoder->cloneable && source_encoder->cloneable)
8391
			index_mask |= (1 << entry);
8392
 
2330 Serge 8393
		entry++;
8394
	}
2327 Serge 8395
 
2330 Serge 8396
	return index_mask;
8397
}
2327 Serge 8398
 
2330 Serge 8399
static bool has_edp_a(struct drm_device *dev)
8400
{
8401
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 8402
 
2330 Serge 8403
	if (!IS_MOBILE(dev))
8404
		return false;
2327 Serge 8405
 
2330 Serge 8406
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
8407
		return false;
2327 Serge 8408
 
2330 Serge 8409
	if (IS_GEN5(dev) &&
8410
	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
8411
		return false;
2327 Serge 8412
 
2330 Serge 8413
	return true;
8414
}
2327 Serge 8415
 
2330 Serge 8416
static void intel_setup_outputs(struct drm_device *dev)
8417
{
8418
	struct drm_i915_private *dev_priv = dev->dev_private;
8419
	struct intel_encoder *encoder;
8420
	bool dpd_is_edp = false;
3031 serge 8421
	bool has_lvds;
2327 Serge 8422
 
2330 Serge 8423
		has_lvds = intel_lvds_init(dev);
8424
	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
8425
		/* disable the panel fitter on everything but LVDS */
8426
		I915_WRITE(PFIT_CONTROL, 0);
8427
	}
2327 Serge 8428
 
3243 Serge 8429
	if (!(IS_HASWELL(dev) &&
8430
	      (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
2330 Serge 8431
	intel_crt_init(dev);
2327 Serge 8432
 
3031 serge 8433
	if (IS_HASWELL(dev)) {
2330 Serge 8434
		int found;
2327 Serge 8435
 
3031 serge 8436
		/* Haswell uses DDI functions to detect digital outputs */
8437
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
8438
		/* DDI A only supports eDP */
8439
		if (found)
8440
			intel_ddi_init(dev, PORT_A);
8441
 
8442
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
8443
		 * register */
8444
		found = I915_READ(SFUSE_STRAP);
8445
 
8446
		if (found & SFUSE_STRAP_DDIB_DETECTED)
8447
			intel_ddi_init(dev, PORT_B);
8448
		if (found & SFUSE_STRAP_DDIC_DETECTED)
8449
			intel_ddi_init(dev, PORT_C);
8450
		if (found & SFUSE_STRAP_DDID_DETECTED)
8451
			intel_ddi_init(dev, PORT_D);
8452
	} else if (HAS_PCH_SPLIT(dev)) {
8453
		int found;
3243 Serge 8454
		dpd_is_edp = intel_dpd_is_edp(dev);
3031 serge 8455
 
3243 Serge 8456
		if (has_edp_a(dev))
8457
			intel_dp_init(dev, DP_A, PORT_A);
8458
 
2330 Serge 8459
		if (I915_READ(HDMIB) & PORT_DETECTED) {
8460
			/* PCH SDVOB multiplex with HDMIB */
3031 serge 8461
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
2330 Serge 8462
			if (!found)
3031 serge 8463
				intel_hdmi_init(dev, HDMIB, PORT_B);
2330 Serge 8464
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
3031 serge 8465
				intel_dp_init(dev, PCH_DP_B, PORT_B);
2330 Serge 8466
		}
2327 Serge 8467
 
2330 Serge 8468
		if (I915_READ(HDMIC) & PORT_DETECTED)
3031 serge 8469
			intel_hdmi_init(dev, HDMIC, PORT_C);
2327 Serge 8470
 
3031 serge 8471
		if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
8472
			intel_hdmi_init(dev, HDMID, PORT_D);
2327 Serge 8473
 
2330 Serge 8474
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
3031 serge 8475
			intel_dp_init(dev, PCH_DP_C, PORT_C);
2327 Serge 8476
 
3243 Serge 8477
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
3031 serge 8478
			intel_dp_init(dev, PCH_DP_D, PORT_D);
8479
	} else if (IS_VALLEYVIEW(dev)) {
8480
		int found;
2327 Serge 8481
 
3243 Serge 8482
		/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
8483
		if (I915_READ(DP_C) & DP_DETECTED)
8484
			intel_dp_init(dev, DP_C, PORT_C);
8485
 
3031 serge 8486
		if (I915_READ(SDVOB) & PORT_DETECTED) {
8487
			/* SDVOB multiplex with HDMIB */
8488
			found = intel_sdvo_init(dev, SDVOB, true);
8489
			if (!found)
8490
				intel_hdmi_init(dev, SDVOB, PORT_B);
8491
			if (!found && (I915_READ(DP_B) & DP_DETECTED))
8492
				intel_dp_init(dev, DP_B, PORT_B);
8493
		}
8494
 
8495
		if (I915_READ(SDVOC) & PORT_DETECTED)
8496
			intel_hdmi_init(dev, SDVOC, PORT_C);
8497
 
2330 Serge 8498
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
8499
		bool found = false;
2327 Serge 8500
 
2330 Serge 8501
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
8502
			DRM_DEBUG_KMS("probing SDVOB\n");
3031 serge 8503
			found = intel_sdvo_init(dev, SDVOB, true);
2330 Serge 8504
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
8505
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
3031 serge 8506
				intel_hdmi_init(dev, SDVOB, PORT_B);
2330 Serge 8507
			}
2327 Serge 8508
 
2330 Serge 8509
			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
8510
				DRM_DEBUG_KMS("probing DP_B\n");
3031 serge 8511
				intel_dp_init(dev, DP_B, PORT_B);
2330 Serge 8512
			}
8513
		}
2327 Serge 8514
 
2330 Serge 8515
		/* Before G4X SDVOC doesn't have its own detect register */
2327 Serge 8516
 
2330 Serge 8517
		if (I915_READ(SDVOB) & SDVO_DETECTED) {
8518
			DRM_DEBUG_KMS("probing SDVOC\n");
3031 serge 8519
			found = intel_sdvo_init(dev, SDVOC, false);
2330 Serge 8520
		}
2327 Serge 8521
 
2330 Serge 8522
		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
2327 Serge 8523
 
2330 Serge 8524
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
8525
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
3031 serge 8526
				intel_hdmi_init(dev, SDVOC, PORT_C);
2330 Serge 8527
			}
8528
			if (SUPPORTS_INTEGRATED_DP(dev)) {
8529
				DRM_DEBUG_KMS("probing DP_C\n");
3031 serge 8530
				intel_dp_init(dev, DP_C, PORT_C);
2330 Serge 8531
			}
8532
		}
2327 Serge 8533
 
2330 Serge 8534
		if (SUPPORTS_INTEGRATED_DP(dev) &&
8535
		    (I915_READ(DP_D) & DP_DETECTED)) {
8536
			DRM_DEBUG_KMS("probing DP_D\n");
3031 serge 8537
			intel_dp_init(dev, DP_D, PORT_D);
2330 Serge 8538
		}
8539
	} else if (IS_GEN2(dev))
8540
		intel_dvo_init(dev);
2327 Serge 8541
 
2330 Serge 8542
//   if (SUPPORTS_TV(dev))
8543
//       intel_tv_init(dev);
2327 Serge 8544
 
2330 Serge 8545
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
8546
		encoder->base.possible_crtcs = encoder->crtc_mask;
8547
		encoder->base.possible_clones =
3031 serge 8548
			intel_encoder_clones(encoder);
2330 Serge 8549
	}
2327 Serge 8550
 
3243 Serge 8551
	intel_init_pch_refclk(dev);
8552
 
8553
	drm_helper_move_panel_connectors_to_head(dev);
2330 Serge 8554
}
8555
 
8556
 
8557
 
2335 Serge 8558
static const struct drm_framebuffer_funcs intel_fb_funcs = {
8559
//	.destroy = intel_user_framebuffer_destroy,
8560
//	.create_handle = intel_user_framebuffer_create_handle,
8561
};
2327 Serge 8562
 
2335 Serge 8563
int intel_framebuffer_init(struct drm_device *dev,
8564
			   struct intel_framebuffer *intel_fb,
2342 Serge 8565
			   struct drm_mode_fb_cmd2 *mode_cmd,
2335 Serge 8566
			   struct drm_i915_gem_object *obj)
8567
{
8568
	int ret;
2327 Serge 8569
 
3243 Serge 8570
	if (obj->tiling_mode == I915_TILING_Y) {
8571
		DRM_DEBUG("hardware does not support tiling Y\n");
2335 Serge 8572
		return -EINVAL;
3243 Serge 8573
	}
2327 Serge 8574
 
3243 Serge 8575
	if (mode_cmd->pitches[0] & 63) {
8576
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
8577
			  mode_cmd->pitches[0]);
8578
		return -EINVAL;
8579
	}
8580
 
8581
	/* FIXME <= Gen4 stride limits are bit unclear */
8582
	if (mode_cmd->pitches[0] > 32768) {
8583
		DRM_DEBUG("pitch (%d) must be at less than 32768\n",
8584
			  mode_cmd->pitches[0]);
8585
		return -EINVAL;
8586
	}
8587
 
8588
	if (obj->tiling_mode != I915_TILING_NONE &&
8589
	    mode_cmd->pitches[0] != obj->stride) {
8590
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
8591
			  mode_cmd->pitches[0], obj->stride);
2335 Serge 8592
			return -EINVAL;
3243 Serge 8593
	}
2327 Serge 8594
 
3243 Serge 8595
	/* Reject formats not supported by any plane early. */
2342 Serge 8596
	switch (mode_cmd->pixel_format) {
3243 Serge 8597
	case DRM_FORMAT_C8:
2342 Serge 8598
	case DRM_FORMAT_RGB565:
8599
	case DRM_FORMAT_XRGB8888:
3243 Serge 8600
	case DRM_FORMAT_ARGB8888:
8601
		break;
8602
	case DRM_FORMAT_XRGB1555:
8603
	case DRM_FORMAT_ARGB1555:
8604
		if (INTEL_INFO(dev)->gen > 3) {
8605
			DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8606
			return -EINVAL;
8607
		}
8608
		break;
3031 serge 8609
	case DRM_FORMAT_XBGR8888:
3243 Serge 8610
	case DRM_FORMAT_ABGR8888:
2342 Serge 8611
	case DRM_FORMAT_XRGB2101010:
8612
	case DRM_FORMAT_ARGB2101010:
3243 Serge 8613
	case DRM_FORMAT_XBGR2101010:
8614
	case DRM_FORMAT_ABGR2101010:
8615
		if (INTEL_INFO(dev)->gen < 4) {
8616
			DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8617
			return -EINVAL;
8618
		}
2335 Serge 8619
		break;
2342 Serge 8620
	case DRM_FORMAT_YUYV:
8621
	case DRM_FORMAT_UYVY:
8622
	case DRM_FORMAT_YVYU:
8623
	case DRM_FORMAT_VYUY:
3243 Serge 8624
		if (INTEL_INFO(dev)->gen < 5) {
8625
			DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8626
			return -EINVAL;
8627
		}
2342 Serge 8628
		break;
2335 Serge 8629
	default:
3243 Serge 8630
		DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
2335 Serge 8631
		return -EINVAL;
8632
	}
2327 Serge 8633
 
3243 Serge 8634
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
8635
	if (mode_cmd->offsets[0] != 0)
8636
		return -EINVAL;
8637
 
2335 Serge 8638
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8639
	if (ret) {
8640
		DRM_ERROR("framebuffer init failed %d\n", ret);
8641
		return ret;
8642
	}
2327 Serge 8643
 
2335 Serge 8644
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
8645
	intel_fb->obj = obj;
8646
	return 0;
8647
}
2327 Serge 8648
 
8649
 
2360 Serge 8650
static const struct drm_mode_config_funcs intel_mode_funcs = {
8651
	.fb_create = NULL /*intel_user_framebuffer_create*/,
8652
	.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
8653
};
2327 Serge 8654
 
3031 serge 8655
/* Set up chip specific display functions */
8656
static void intel_init_display(struct drm_device *dev)
8657
{
8658
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 8659
 
3031 serge 8660
	/* We always want a DPMS function */
3243 Serge 8661
	if (IS_HASWELL(dev)) {
8662
		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
8663
		dev_priv->display.crtc_enable = haswell_crtc_enable;
8664
		dev_priv->display.crtc_disable = haswell_crtc_disable;
8665
		dev_priv->display.off = haswell_crtc_off;
8666
		dev_priv->display.update_plane = ironlake_update_plane;
8667
	} else if (HAS_PCH_SPLIT(dev)) {
3031 serge 8668
		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
8669
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
8670
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
8671
		dev_priv->display.off = ironlake_crtc_off;
8672
		dev_priv->display.update_plane = ironlake_update_plane;
8673
	} else {
8674
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
8675
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
8676
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
8677
		dev_priv->display.off = i9xx_crtc_off;
8678
		dev_priv->display.update_plane = i9xx_update_plane;
8679
	}
2327 Serge 8680
 
3031 serge 8681
	/* Returns the core display clock speed */
8682
	if (IS_VALLEYVIEW(dev))
8683
		dev_priv->display.get_display_clock_speed =
8684
			valleyview_get_display_clock_speed;
8685
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
8686
		dev_priv->display.get_display_clock_speed =
8687
			i945_get_display_clock_speed;
8688
	else if (IS_I915G(dev))
8689
		dev_priv->display.get_display_clock_speed =
8690
			i915_get_display_clock_speed;
8691
	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
8692
		dev_priv->display.get_display_clock_speed =
8693
			i9xx_misc_get_display_clock_speed;
8694
	else if (IS_I915GM(dev))
8695
		dev_priv->display.get_display_clock_speed =
8696
			i915gm_get_display_clock_speed;
8697
	else if (IS_I865G(dev))
8698
		dev_priv->display.get_display_clock_speed =
8699
			i865_get_display_clock_speed;
8700
	else if (IS_I85X(dev))
8701
		dev_priv->display.get_display_clock_speed =
8702
			i855_get_display_clock_speed;
8703
	else /* 852, 830 */
8704
		dev_priv->display.get_display_clock_speed =
8705
			i830_get_display_clock_speed;
2327 Serge 8706
 
3031 serge 8707
	if (HAS_PCH_SPLIT(dev)) {
8708
		if (IS_GEN5(dev)) {
8709
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
8710
			dev_priv->display.write_eld = ironlake_write_eld;
8711
		} else if (IS_GEN6(dev)) {
8712
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
8713
			dev_priv->display.write_eld = ironlake_write_eld;
8714
		} else if (IS_IVYBRIDGE(dev)) {
8715
			/* FIXME: detect B0+ stepping and use auto training */
8716
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
8717
			dev_priv->display.write_eld = ironlake_write_eld;
3243 Serge 8718
			dev_priv->display.modeset_global_resources =
8719
				ivb_modeset_global_resources;
3031 serge 8720
		} else if (IS_HASWELL(dev)) {
8721
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
8722
			dev_priv->display.write_eld = haswell_write_eld;
8723
		} else
8724
			dev_priv->display.update_wm = NULL;
8725
	} else if (IS_G4X(dev)) {
8726
		dev_priv->display.write_eld = g4x_write_eld;
8727
	}
2327 Serge 8728
 
3031 serge 8729
	/* Default just returns -ENODEV to indicate unsupported */
8730
//	dev_priv->display.queue_flip = intel_default_queue_flip;
2327 Serge 8731
 
8732
 
8733
 
8734
 
3031 serge 8735
}
8736
 
8737
/*
8738
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
8739
 * resume, or other times.  This quirk makes sure that's the case for
8740
 * affected systems.
8741
 */
8742
static void quirk_pipea_force(struct drm_device *dev)
2330 Serge 8743
{
8744
	struct drm_i915_private *dev_priv = dev->dev_private;
2327 Serge 8745
 
3031 serge 8746
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
8747
	DRM_INFO("applying pipe a force quirk\n");
8748
}
2327 Serge 8749
 
3031 serge 8750
/*
8751
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
8752
 */
8753
static void quirk_ssc_force_disable(struct drm_device *dev)
8754
{
8755
	struct drm_i915_private *dev_priv = dev->dev_private;
8756
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
8757
	DRM_INFO("applying lvds SSC disable quirk\n");
2330 Serge 8758
}
2327 Serge 8759
 
3031 serge 8760
/*
8761
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
8762
 * brightness value
8763
 */
8764
static void quirk_invert_brightness(struct drm_device *dev)
2330 Serge 8765
{
8766
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 8767
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
8768
	DRM_INFO("applying inverted panel brightness quirk\n");
8769
}
2327 Serge 8770
 
3031 serge 8771
struct intel_quirk {
8772
	int device;
8773
	int subsystem_vendor;
8774
	int subsystem_device;
8775
	void (*hook)(struct drm_device *dev);
8776
};
2327 Serge 8777
 
3031 serge 8778
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
8779
struct intel_dmi_quirk {
8780
	void (*hook)(struct drm_device *dev);
8781
	const struct dmi_system_id (*dmi_id_list)[];
8782
};
2327 Serge 8783
 
3031 serge 8784
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
8785
{
8786
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
8787
	return 1;
2330 Serge 8788
}
2327 Serge 8789
 
3031 serge 8790
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
8791
	{
8792
		.dmi_id_list = &(const struct dmi_system_id[]) {
8793
			{
8794
				.callback = intel_dmi_reverse_brightness,
8795
				.ident = "NCR Corporation",
8796
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
8797
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
8798
				},
8799
			},
8800
			{ }  /* terminating entry */
8801
		},
8802
		.hook = quirk_invert_brightness,
8803
	},
8804
};
2327 Serge 8805
 
3031 serge 8806
static struct intel_quirk intel_quirks[] = {
8807
	/* HP Mini needs pipe A force quirk (LP: #322104) */
8808
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
2327 Serge 8809
 
3031 serge 8810
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
8811
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
2327 Serge 8812
 
3031 serge 8813
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
8814
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
2327 Serge 8815
 
3031 serge 8816
	/* 830/845 need to leave pipe A & dpll A up */
8817
	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8818
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
2327 Serge 8819
 
3031 serge 8820
	/* Lenovo U160 cannot use SSC on LVDS */
8821
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
2327 Serge 8822
 
3031 serge 8823
	/* Sony Vaio Y cannot use SSC on LVDS */
8824
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
2327 Serge 8825
 
3031 serge 8826
	/* Acer Aspire 5734Z must invert backlight brightness */
8827
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
8828
};
2327 Serge 8829
 
3031 serge 8830
static void intel_init_quirks(struct drm_device *dev)
2330 Serge 8831
{
3031 serge 8832
	struct pci_dev *d = dev->pdev;
8833
	int i;
2327 Serge 8834
 
3031 serge 8835
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
8836
		struct intel_quirk *q = &intel_quirks[i];
2327 Serge 8837
 
3031 serge 8838
		if (d->device == q->device &&
8839
		    (d->subsystem_vendor == q->subsystem_vendor ||
8840
		     q->subsystem_vendor == PCI_ANY_ID) &&
8841
		    (d->subsystem_device == q->subsystem_device ||
8842
		     q->subsystem_device == PCI_ANY_ID))
8843
			q->hook(dev);
8844
	}
8845
//	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
8846
//		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
8847
//			intel_dmi_quirks[i].hook(dev);
8848
//	}
2330 Serge 8849
}
2327 Serge 8850
 
3031 serge 8851
/* Disable the VGA plane that we never use */
8852
static void i915_disable_vga(struct drm_device *dev)
2330 Serge 8853
{
8854
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 8855
	u8 sr1;
8856
	u32 vga_reg;
2327 Serge 8857
 
3031 serge 8858
	if (HAS_PCH_SPLIT(dev))
8859
		vga_reg = CPU_VGACNTRL;
8860
	else
8861
		vga_reg = VGACNTRL;
2327 Serge 8862
 
3031 serge 8863
//   vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
8864
    out8(SR01, VGA_SR_INDEX);
8865
    sr1 = in8(VGA_SR_DATA);
8866
    out8(sr1 | 1<<5, VGA_SR_DATA);
8867
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
8868
	udelay(300);
2327 Serge 8869
 
3031 serge 8870
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
8871
	POSTING_READ(vga_reg);
2330 Serge 8872
}
8873
 
3031 serge 8874
void intel_modeset_init_hw(struct drm_device *dev)
2342 Serge 8875
{
3031 serge 8876
	/* We attempt to init the necessary power wells early in the initialization
8877
	 * time, so the subsystems that expect power to be enabled can work.
2342 Serge 8878
	 */
3031 serge 8879
	intel_init_power_wells(dev);
2342 Serge 8880
 
3031 serge 8881
	intel_prepare_ddi(dev);
2342 Serge 8882
 
3031 serge 8883
	intel_init_clock_gating(dev);
8884
 
8885
//   mutex_lock(&dev->struct_mutex);
8886
//   intel_enable_gt_powersave(dev);
8887
//   mutex_unlock(&dev->struct_mutex);
2342 Serge 8888
}
8889
 
3031 serge 8890
void intel_modeset_init(struct drm_device *dev)
2330 Serge 8891
{
3031 serge 8892
	struct drm_i915_private *dev_priv = dev->dev_private;
8893
	int i, ret;
2330 Serge 8894
 
3031 serge 8895
	drm_mode_config_init(dev);
2330 Serge 8896
 
3031 serge 8897
	dev->mode_config.min_width = 0;
8898
	dev->mode_config.min_height = 0;
2330 Serge 8899
 
3031 serge 8900
	dev->mode_config.preferred_depth = 24;
8901
	dev->mode_config.prefer_shadow = 1;
2330 Serge 8902
 
3031 serge 8903
	dev->mode_config.funcs = &intel_mode_funcs;
2330 Serge 8904
 
3031 serge 8905
	intel_init_quirks(dev);
2330 Serge 8906
 
3031 serge 8907
	intel_init_pm(dev);
2330 Serge 8908
 
3031 serge 8909
	intel_init_display(dev);
2330 Serge 8910
 
3031 serge 8911
	if (IS_GEN2(dev)) {
8912
		dev->mode_config.max_width = 2048;
8913
		dev->mode_config.max_height = 2048;
8914
	} else if (IS_GEN3(dev)) {
8915
		dev->mode_config.max_width = 4096;
8916
		dev->mode_config.max_height = 4096;
8917
	} else {
8918
		dev->mode_config.max_width = 8192;
8919
		dev->mode_config.max_height = 8192;
8920
	}
8921
	dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
2330 Serge 8922
 
3031 serge 8923
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
8924
		      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
2330 Serge 8925
 
3031 serge 8926
	for (i = 0; i < dev_priv->num_pipe; i++) {
8927
		intel_crtc_init(dev, i);
8928
		ret = intel_plane_init(dev, i);
8929
		if (ret)
8930
			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
2330 Serge 8931
	}
8932
 
3243 Serge 8933
	intel_cpu_pll_init(dev);
3031 serge 8934
	intel_pch_pll_init(dev);
2330 Serge 8935
 
3031 serge 8936
	/* Just disable it once at startup */
8937
	i915_disable_vga(dev);
8938
	intel_setup_outputs(dev);
8939
}
2330 Serge 8940
 
3031 serge 8941
static void
8942
intel_connector_break_all_links(struct intel_connector *connector)
8943
{
8944
	connector->base.dpms = DRM_MODE_DPMS_OFF;
8945
	connector->base.encoder = NULL;
8946
	connector->encoder->connectors_active = false;
8947
	connector->encoder->base.crtc = NULL;
2330 Serge 8948
}
8949
 
3031 serge 8950
static void intel_enable_pipe_a(struct drm_device *dev)
2330 Serge 8951
{
3031 serge 8952
	struct intel_connector *connector;
8953
	struct drm_connector *crt = NULL;
8954
	struct intel_load_detect_pipe load_detect_temp;
2330 Serge 8955
 
3031 serge 8956
	/* We can't just switch on the pipe A, we need to set things up with a
8957
	 * proper mode and output configuration. As a gross hack, enable pipe A
8958
	 * by enabling the load detect pipe once. */
8959
	list_for_each_entry(connector,
8960
			    &dev->mode_config.connector_list,
8961
			    base.head) {
8962
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
8963
			crt = &connector->base;
8964
			break;
2330 Serge 8965
		}
8966
	}
8967
 
3031 serge 8968
	if (!crt)
8969
		return;
2330 Serge 8970
 
3031 serge 8971
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
8972
		intel_release_load_detect_pipe(crt, &load_detect_temp);
2327 Serge 8973
 
8974
 
8975
}
8976
 
3031 serge 8977
static bool
8978
intel_check_plane_mapping(struct intel_crtc *crtc)
2327 Serge 8979
{
3031 serge 8980
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
8981
	u32 reg, val;
2327 Serge 8982
 
3031 serge 8983
	if (dev_priv->num_pipe == 1)
8984
		return true;
2327 Serge 8985
 
3031 serge 8986
	reg = DSPCNTR(!crtc->plane);
8987
	val = I915_READ(reg);
2327 Serge 8988
 
3031 serge 8989
	if ((val & DISPLAY_PLANE_ENABLE) &&
8990
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
8991
		return false;
2327 Serge 8992
 
3031 serge 8993
	return true;
2327 Serge 8994
}
8995
 
3031 serge 8996
static void intel_sanitize_crtc(struct intel_crtc *crtc)
2327 Serge 8997
{
3031 serge 8998
	struct drm_device *dev = crtc->base.dev;
2327 Serge 8999
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9000
	u32 reg;
2327 Serge 9001
 
3031 serge 9002
	/* Clear any frame start delays used for debugging left by the BIOS */
3243 Serge 9003
	reg = PIPECONF(crtc->cpu_transcoder);
3031 serge 9004
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
2327 Serge 9005
 
3031 serge 9006
	/* We need to sanitize the plane -> pipe mapping first because this will
9007
	 * disable the crtc (and hence change the state) if it is wrong. Note
9008
	 * that gen4+ has a fixed plane -> pipe mapping.  */
9009
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
9010
		struct intel_connector *connector;
9011
		bool plane;
2327 Serge 9012
 
3031 serge 9013
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
9014
			      crtc->base.base.id);
2327 Serge 9015
 
3031 serge 9016
		/* Pipe has the wrong plane attached and the plane is active.
9017
		 * Temporarily change the plane mapping and disable everything
9018
		 * ...  */
9019
		plane = crtc->plane;
9020
		crtc->plane = !plane;
9021
		dev_priv->display.crtc_disable(&crtc->base);
9022
		crtc->plane = plane;
2342 Serge 9023
 
3031 serge 9024
		/* ... and break all links. */
9025
		list_for_each_entry(connector, &dev->mode_config.connector_list,
9026
				    base.head) {
9027
			if (connector->encoder->base.crtc != &crtc->base)
9028
				continue;
2327 Serge 9029
 
3031 serge 9030
			intel_connector_break_all_links(connector);
9031
		}
2327 Serge 9032
 
3031 serge 9033
		WARN_ON(crtc->active);
9034
		crtc->base.enabled = false;
9035
	}
2327 Serge 9036
 
3031 serge 9037
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
9038
	    crtc->pipe == PIPE_A && !crtc->active) {
9039
		/* BIOS forgot to enable pipe A, this mostly happens after
9040
		 * resume. Force-enable the pipe to fix this, the update_dpms
9041
		 * call below we restore the pipe to the right state, but leave
9042
		 * the required bits on. */
9043
		intel_enable_pipe_a(dev);
9044
	}
2327 Serge 9045
 
3031 serge 9046
	/* Adjust the state of the output pipe according to whether we
9047
	 * have active connectors/encoders. */
9048
	intel_crtc_update_dpms(&crtc->base);
2327 Serge 9049
 
3031 serge 9050
	if (crtc->active != crtc->base.enabled) {
9051
		struct intel_encoder *encoder;
2327 Serge 9052
 
3031 serge 9053
		/* This can happen either due to bugs in the get_hw_state
9054
		 * functions or because the pipe is force-enabled due to the
9055
		 * pipe A quirk. */
9056
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
9057
			      crtc->base.base.id,
9058
			      crtc->base.enabled ? "enabled" : "disabled",
9059
			      crtc->active ? "enabled" : "disabled");
2327 Serge 9060
 
3031 serge 9061
		crtc->base.enabled = crtc->active;
2327 Serge 9062
 
3031 serge 9063
		/* Because we only establish the connector -> encoder ->
9064
		 * crtc links if something is active, this means the
9065
		 * crtc is now deactivated. Break the links. connector
9066
		 * -> encoder links are only establish when things are
9067
		 *  actually up, hence no need to break them. */
9068
		WARN_ON(crtc->active);
2327 Serge 9069
 
3031 serge 9070
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
9071
			WARN_ON(encoder->connectors_active);
9072
			encoder->base.crtc = NULL;
9073
		}
9074
	}
2327 Serge 9075
}
9076
 
3031 serge 9077
static void intel_sanitize_encoder(struct intel_encoder *encoder)
2327 Serge 9078
{
3031 serge 9079
	struct intel_connector *connector;
9080
	struct drm_device *dev = encoder->base.dev;
2327 Serge 9081
 
3031 serge 9082
	/* We need to check both for a crtc link (meaning that the
9083
	 * encoder is active and trying to read from a pipe) and the
9084
	 * pipe itself being active. */
9085
	bool has_active_crtc = encoder->base.crtc &&
9086
		to_intel_crtc(encoder->base.crtc)->active;
2327 Serge 9087
 
3031 serge 9088
	if (encoder->connectors_active && !has_active_crtc) {
9089
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
9090
			      encoder->base.base.id,
9091
			      drm_get_encoder_name(&encoder->base));
2327 Serge 9092
 
3031 serge 9093
		/* Connector is active, but has no active pipe. This is
9094
		 * fallout from our resume register restoring. Disable
9095
		 * the encoder manually again. */
9096
		if (encoder->base.crtc) {
9097
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
9098
				      encoder->base.base.id,
9099
				      drm_get_encoder_name(&encoder->base));
9100
			encoder->disable(encoder);
9101
		}
2327 Serge 9102
 
3031 serge 9103
		/* Inconsistent output/port/pipe state happens presumably due to
9104
		 * a bug in one of the get_hw_state functions. Or someplace else
9105
		 * in our code, like the register restore mess on resume. Clamp
9106
		 * things to off as a safer default. */
9107
		list_for_each_entry(connector,
9108
				    &dev->mode_config.connector_list,
9109
				    base.head) {
9110
			if (connector->encoder != encoder)
9111
				continue;
2327 Serge 9112
 
3031 serge 9113
			intel_connector_break_all_links(connector);
9114
		}
9115
	}
9116
	/* Enabled encoders without active connectors will be fixed in
9117
	 * the crtc fixup. */
2327 Serge 9118
}
9119
 
3031 serge 9120
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
9121
 * and i915 state tracking structures. */
3243 Serge 9122
void intel_modeset_setup_hw_state(struct drm_device *dev,
9123
				  bool force_restore)
2332 Serge 9124
{
9125
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9126
	enum pipe pipe;
9127
	u32 tmp;
9128
	struct intel_crtc *crtc;
9129
	struct intel_encoder *encoder;
9130
	struct intel_connector *connector;
2327 Serge 9131
 
3243 Serge 9132
	if (IS_HASWELL(dev)) {
9133
		tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9134
 
9135
		if (tmp & TRANS_DDI_FUNC_ENABLE) {
9136
			switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9137
			case TRANS_DDI_EDP_INPUT_A_ON:
9138
			case TRANS_DDI_EDP_INPUT_A_ONOFF:
9139
				pipe = PIPE_A;
9140
				break;
9141
			case TRANS_DDI_EDP_INPUT_B_ONOFF:
9142
				pipe = PIPE_B;
9143
				break;
9144
			case TRANS_DDI_EDP_INPUT_C_ONOFF:
9145
				pipe = PIPE_C;
9146
				break;
9147
			}
9148
 
9149
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9150
			crtc->cpu_transcoder = TRANSCODER_EDP;
9151
 
9152
			DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
9153
				      pipe_name(pipe));
9154
		}
9155
	}
9156
 
3031 serge 9157
	for_each_pipe(pipe) {
9158
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2327 Serge 9159
 
3243 Serge 9160
		tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
3031 serge 9161
		if (tmp & PIPECONF_ENABLE)
9162
			crtc->active = true;
9163
		else
9164
			crtc->active = false;
2327 Serge 9165
 
3031 serge 9166
		crtc->base.enabled = crtc->active;
2330 Serge 9167
 
3031 serge 9168
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
9169
			      crtc->base.base.id,
9170
			      crtc->active ? "enabled" : "disabled");
2339 Serge 9171
	}
2332 Serge 9172
 
3243 Serge 9173
	if (IS_HASWELL(dev))
9174
		intel_ddi_setup_hw_pll_state(dev);
9175
 
3031 serge 9176
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9177
			    base.head) {
9178
		pipe = 0;
2332 Serge 9179
 
3031 serge 9180
		if (encoder->get_hw_state(encoder, &pipe)) {
9181
			encoder->base.crtc =
9182
				dev_priv->pipe_to_crtc_mapping[pipe];
9183
		} else {
9184
			encoder->base.crtc = NULL;
9185
		}
2332 Serge 9186
 
3031 serge 9187
		encoder->connectors_active = false;
9188
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
9189
			      encoder->base.base.id,
9190
			      drm_get_encoder_name(&encoder->base),
9191
			      encoder->base.crtc ? "enabled" : "disabled",
9192
			      pipe);
9193
	}
2332 Serge 9194
 
3031 serge 9195
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9196
			    base.head) {
9197
		if (connector->get_hw_state(connector)) {
9198
			connector->base.dpms = DRM_MODE_DPMS_ON;
9199
			connector->encoder->connectors_active = true;
9200
			connector->base.encoder = &connector->encoder->base;
9201
		} else {
9202
			connector->base.dpms = DRM_MODE_DPMS_OFF;
9203
			connector->base.encoder = NULL;
9204
		}
9205
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
9206
			      connector->base.base.id,
9207
			      drm_get_connector_name(&connector->base),
9208
			      connector->base.encoder ? "enabled" : "disabled");
2332 Serge 9209
	}
9210
 
3031 serge 9211
	/* HW state is read out, now we need to sanitize this mess. */
9212
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9213
			    base.head) {
9214
		intel_sanitize_encoder(encoder);
2332 Serge 9215
	}
9216
 
3031 serge 9217
	for_each_pipe(pipe) {
9218
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9219
		intel_sanitize_crtc(crtc);
2332 Serge 9220
	}
9221
 
3243 Serge 9222
	if (force_restore) {
9223
		for_each_pipe(pipe) {
9224
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9225
			intel_set_mode(&crtc->base, &crtc->base.mode,
9226
				       crtc->base.x, crtc->base.y, crtc->base.fb);
9227
		}
9228
 
9229
//       i915_redisable_vga(dev);
9230
	} else {
3031 serge 9231
	intel_modeset_update_staged_output_state(dev);
3243 Serge 9232
	}
2332 Serge 9233
 
3031 serge 9234
	intel_modeset_check_state(dev);
3243 Serge 9235
 
9236
	drm_mode_config_reset(dev);
2332 Serge 9237
}
9238
 
3031 serge 9239
void intel_modeset_gem_init(struct drm_device *dev)
2330 Serge 9240
{
3031 serge 9241
	intel_modeset_init_hw(dev);
2330 Serge 9242
 
3031 serge 9243
//   intel_setup_overlay(dev);
2330 Serge 9244
 
3243 Serge 9245
	intel_modeset_setup_hw_state(dev, false);
2330 Serge 9246
}
9247
 
3031 serge 9248
void intel_modeset_cleanup(struct drm_device *dev)
2327 Serge 9249
{
3031 serge 9250
#if 0
9251
	struct drm_i915_private *dev_priv = dev->dev_private;
9252
	struct drm_crtc *crtc;
9253
	struct intel_crtc *intel_crtc;
2327 Serge 9254
 
3031 serge 9255
//   drm_kms_helper_poll_fini(dev);
9256
	mutex_lock(&dev->struct_mutex);
2327 Serge 9257
 
3031 serge 9258
//   intel_unregister_dsm_handler();
2327 Serge 9259
 
9260
 
3031 serge 9261
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9262
		/* Skip inactive CRTCs */
9263
		if (!crtc->fb)
9264
			continue;
2342 Serge 9265
 
3031 serge 9266
		intel_crtc = to_intel_crtc(crtc);
9267
		intel_increase_pllclock(crtc);
9268
	}
2342 Serge 9269
 
3031 serge 9270
	intel_disable_fbc(dev);
2342 Serge 9271
 
3031 serge 9272
	intel_disable_gt_powersave(dev);
2342 Serge 9273
 
3031 serge 9274
	ironlake_teardown_rc6(dev);
2327 Serge 9275
 
3031 serge 9276
	if (IS_VALLEYVIEW(dev))
9277
		vlv_init_dpio(dev);
2327 Serge 9278
 
3031 serge 9279
	mutex_unlock(&dev->struct_mutex);
2327 Serge 9280
 
3031 serge 9281
	/* Disable the irq before mode object teardown, for the irq might
9282
	 * enqueue unpin/hotplug work. */
9283
//   drm_irq_uninstall(dev);
9284
//   cancel_work_sync(&dev_priv->hotplug_work);
9285
//   cancel_work_sync(&dev_priv->rps.work);
2327 Serge 9286
 
3031 serge 9287
	/* flush any delayed tasks or pending work */
9288
//   flush_scheduled_work();
2327 Serge 9289
 
3031 serge 9290
	drm_mode_config_cleanup(dev);
2327 Serge 9291
#endif
9292
}
9293
 
9294
/*
3031 serge 9295
 * Return which encoder is currently attached for connector.
2327 Serge 9296
 */
3031 serge 9297
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
2327 Serge 9298
{
3031 serge 9299
	return &intel_attached_encoder(connector)->base;
9300
}
2327 Serge 9301
 
3031 serge 9302
void intel_connector_attach_encoder(struct intel_connector *connector,
9303
				    struct intel_encoder *encoder)
9304
{
9305
	connector->encoder = encoder;
9306
	drm_mode_connector_attach_encoder(&connector->base,
9307
					  &encoder->base);
2327 Serge 9308
}
9309
 
9310
/*
3031 serge 9311
 * set vga decode state - true == enable VGA decode
2327 Serge 9312
 */
3031 serge 9313
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
2327 Serge 9314
{
2330 Serge 9315
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 9316
	u16 gmch_ctrl;
2327 Serge 9317
 
3031 serge 9318
	pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
9319
	if (state)
9320
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
2330 Serge 9321
	else
3031 serge 9322
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
9323
	pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
9324
	return 0;
2330 Serge 9325
}
9326
 
3031 serge 9327
#ifdef CONFIG_DEBUG_FS
9328
#include 
2327 Serge 9329
 
3031 serge 9330
struct intel_display_error_state {
9331
	struct intel_cursor_error_state {
9332
		u32 control;
9333
		u32 position;
9334
		u32 base;
9335
		u32 size;
9336
	} cursor[I915_MAX_PIPES];
2327 Serge 9337
 
3031 serge 9338
	struct intel_pipe_error_state {
9339
		u32 conf;
9340
		u32 source;
2327 Serge 9341
 
3031 serge 9342
		u32 htotal;
9343
		u32 hblank;
9344
		u32 hsync;
9345
		u32 vtotal;
9346
		u32 vblank;
9347
		u32 vsync;
9348
	} pipe[I915_MAX_PIPES];
2327 Serge 9349
 
3031 serge 9350
	struct intel_plane_error_state {
9351
		u32 control;
9352
		u32 stride;
9353
		u32 size;
9354
		u32 pos;
9355
		u32 addr;
9356
		u32 surface;
9357
		u32 tile_offset;
9358
	} plane[I915_MAX_PIPES];
9359
};
2327 Serge 9360
 
3031 serge 9361
struct intel_display_error_state *
9362
intel_display_capture_error_state(struct drm_device *dev)
9363
{
9364
	drm_i915_private_t *dev_priv = dev->dev_private;
9365
	struct intel_display_error_state *error;
3243 Serge 9366
	enum transcoder cpu_transcoder;
3031 serge 9367
	int i;
2327 Serge 9368
 
3031 serge 9369
	error = kmalloc(sizeof(*error), GFP_ATOMIC);
9370
	if (error == NULL)
9371
		return NULL;
2327 Serge 9372
 
3031 serge 9373
	for_each_pipe(i) {
3243 Serge 9374
		cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
9375
 
3031 serge 9376
		error->cursor[i].control = I915_READ(CURCNTR(i));
9377
		error->cursor[i].position = I915_READ(CURPOS(i));
9378
		error->cursor[i].base = I915_READ(CURBASE(i));
2327 Serge 9379
 
3031 serge 9380
		error->plane[i].control = I915_READ(DSPCNTR(i));
9381
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
9382
		error->plane[i].size = I915_READ(DSPSIZE(i));
9383
		error->plane[i].pos = I915_READ(DSPPOS(i));
9384
		error->plane[i].addr = I915_READ(DSPADDR(i));
9385
		if (INTEL_INFO(dev)->gen >= 4) {
9386
			error->plane[i].surface = I915_READ(DSPSURF(i));
9387
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
9388
		}
2327 Serge 9389
 
3243 Serge 9390
		error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
3031 serge 9391
		error->pipe[i].source = I915_READ(PIPESRC(i));
3243 Serge 9392
		error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
9393
		error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
9394
		error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
9395
		error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
9396
		error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
9397
		error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
3031 serge 9398
	}
2327 Serge 9399
 
3031 serge 9400
	return error;
2330 Serge 9401
}
2327 Serge 9402
 
3031 serge 9403
void
9404
intel_display_print_error_state(struct seq_file *m,
9405
				struct drm_device *dev,
9406
				struct intel_display_error_state *error)
2332 Serge 9407
{
3031 serge 9408
	drm_i915_private_t *dev_priv = dev->dev_private;
9409
	int i;
2330 Serge 9410
 
3031 serge 9411
	seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
9412
	for_each_pipe(i) {
9413
		seq_printf(m, "Pipe [%d]:\n", i);
9414
		seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
9415
		seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
9416
		seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
9417
		seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
9418
		seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
9419
		seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
9420
		seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
9421
		seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
2332 Serge 9422
 
3031 serge 9423
		seq_printf(m, "Plane [%d]:\n", i);
9424
		seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
9425
		seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
9426
		seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
9427
		seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
9428
		seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
9429
		if (INTEL_INFO(dev)->gen >= 4) {
9430
			seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
9431
			seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
9432
		}
2332 Serge 9433
 
3031 serge 9434
		seq_printf(m, "Cursor [%d]:\n", i);
9435
		seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
9436
		seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
9437
		seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
9438
	}
2327 Serge 9439
}
3031 serge 9440
#endif