Subversion Repositories Kolibri OS

Rev

Rev 4557 | Rev 5060 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4557 Rev 4560
1
/*
1
/*
2
 * Copyright © 2006-2007 Intel Corporation
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
21
 * DEALINGS IN THE SOFTWARE.
22
 *
22
 *
23
 * Authors:
23
 * Authors:
24
 *  Eric Anholt 
24
 *  Eric Anholt 
25
 */
25
 */
26
 
26
 
27
//#include 
27
//#include 
28
#include 
28
#include 
29
//#include 
29
//#include 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
35
#include 
36
#include "intel_drv.h"
36
#include "intel_drv.h"
37
#include 
37
#include 
38
#include "i915_drv.h"
38
#include "i915_drv.h"
39
#include "i915_trace.h"
39
#include "i915_trace.h"
40
#include 
40
#include 
41
#include 
41
#include 
42
//#include 
42
//#include 
43
 
43
 
44
#define MAX_ERRNO       4095
44
#define MAX_ERRNO       4095
45
phys_addr_t get_bus_addr(void);
45
phys_addr_t get_bus_addr(void);
-
 
46
 
-
 
47
static inline void outb(u8 v, u16 port)
46
 
48
{
-
 
49
    asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
-
 
50
}
-
 
51
static inline u8 inb(u16 port)
-
 
52
{
-
 
53
    u8 v;
-
 
54
    asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
-
 
55
    return v;
-
 
56
}
47
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
57
 
48
static void intel_increase_pllclock(struct drm_crtc *crtc);
58
static void intel_increase_pllclock(struct drm_crtc *crtc);
49
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
59
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
50
 
60
 
51
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
61
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
52
				struct intel_crtc_config *pipe_config);
62
				struct intel_crtc_config *pipe_config);
53
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
63
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
54
				    struct intel_crtc_config *pipe_config);
64
				    struct intel_crtc_config *pipe_config);
55
 
65
 
56
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
66
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
57
			  int x, int y, struct drm_framebuffer *old_fb);
67
			  int x, int y, struct drm_framebuffer *old_fb);
58
 
68
 
59
 
69
 
60
typedef struct {
70
typedef struct {
61
    int min, max;
71
    int min, max;
62
} intel_range_t;
72
} intel_range_t;
63
 
73
 
64
typedef struct {
74
typedef struct {
65
    int dot_limit;
75
    int dot_limit;
66
    int p2_slow, p2_fast;
76
    int p2_slow, p2_fast;
67
} intel_p2_t;
77
} intel_p2_t;
68
 
78
 
69
typedef struct intel_limit intel_limit_t;
79
typedef struct intel_limit intel_limit_t;
70
struct intel_limit {
80
struct intel_limit {
71
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
81
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
72
    intel_p2_t      p2;
82
    intel_p2_t      p2;
73
};
83
};
74
 
-
 
75
/* FDI */
-
 
76
#define IRONLAKE_FDI_FREQ       2700000 /* in kHz for mode->clock */
-
 
77
 
84
 
78
int
85
int
79
intel_pch_rawclk(struct drm_device *dev)
86
intel_pch_rawclk(struct drm_device *dev)
80
{
87
{
81
	struct drm_i915_private *dev_priv = dev->dev_private;
88
	struct drm_i915_private *dev_priv = dev->dev_private;
82
 
89
 
83
	WARN_ON(!HAS_PCH_SPLIT(dev));
90
	WARN_ON(!HAS_PCH_SPLIT(dev));
84
 
91
 
85
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
92
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
86
}
93
}
87
 
94
 
88
static inline u32 /* units of 100MHz */
95
static inline u32 /* units of 100MHz */
89
intel_fdi_link_freq(struct drm_device *dev)
96
intel_fdi_link_freq(struct drm_device *dev)
90
{
97
{
91
	if (IS_GEN5(dev)) {
98
	if (IS_GEN5(dev)) {
92
		struct drm_i915_private *dev_priv = dev->dev_private;
99
		struct drm_i915_private *dev_priv = dev->dev_private;
93
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
100
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
94
	} else
101
	} else
95
		return 27;
102
		return 27;
96
}
103
}
97
 
104
 
98
static const intel_limit_t intel_limits_i8xx_dac = {
105
static const intel_limit_t intel_limits_i8xx_dac = {
99
	.dot = { .min = 25000, .max = 350000 },
106
	.dot = { .min = 25000, .max = 350000 },
100
	.vco = { .min = 930000, .max = 1400000 },
107
	.vco = { .min = 908000, .max = 1512000 },
101
	.n = { .min = 3, .max = 16 },
108
	.n = { .min = 2, .max = 16 },
102
	.m = { .min = 96, .max = 140 },
109
	.m = { .min = 96, .max = 140 },
103
	.m1 = { .min = 18, .max = 26 },
110
	.m1 = { .min = 18, .max = 26 },
104
	.m2 = { .min = 6, .max = 16 },
111
	.m2 = { .min = 6, .max = 16 },
105
	.p = { .min = 4, .max = 128 },
112
	.p = { .min = 4, .max = 128 },
106
	.p1 = { .min = 2, .max = 33 },
113
	.p1 = { .min = 2, .max = 33 },
107
	.p2 = { .dot_limit = 165000,
114
	.p2 = { .dot_limit = 165000,
108
		.p2_slow = 4, .p2_fast = 2 },
115
		.p2_slow = 4, .p2_fast = 2 },
109
};
116
};
110
 
117
 
111
static const intel_limit_t intel_limits_i8xx_dvo = {
118
static const intel_limit_t intel_limits_i8xx_dvo = {
112
        .dot = { .min = 25000, .max = 350000 },
119
        .dot = { .min = 25000, .max = 350000 },
113
        .vco = { .min = 930000, .max = 1400000 },
120
	.vco = { .min = 908000, .max = 1512000 },
114
        .n = { .min = 3, .max = 16 },
121
	.n = { .min = 2, .max = 16 },
115
        .m = { .min = 96, .max = 140 },
122
        .m = { .min = 96, .max = 140 },
116
        .m1 = { .min = 18, .max = 26 },
123
        .m1 = { .min = 18, .max = 26 },
117
        .m2 = { .min = 6, .max = 16 },
124
        .m2 = { .min = 6, .max = 16 },
118
        .p = { .min = 4, .max = 128 },
125
        .p = { .min = 4, .max = 128 },
119
        .p1 = { .min = 2, .max = 33 },
126
        .p1 = { .min = 2, .max = 33 },
120
	.p2 = { .dot_limit = 165000,
127
	.p2 = { .dot_limit = 165000,
121
		.p2_slow = 4, .p2_fast = 4 },
128
		.p2_slow = 4, .p2_fast = 4 },
122
};
129
};
123
 
130
 
124
static const intel_limit_t intel_limits_i8xx_lvds = {
131
static const intel_limit_t intel_limits_i8xx_lvds = {
125
        .dot = { .min = 25000, .max = 350000 },
132
        .dot = { .min = 25000, .max = 350000 },
126
        .vco = { .min = 930000, .max = 1400000 },
133
	.vco = { .min = 908000, .max = 1512000 },
127
        .n = { .min = 3, .max = 16 },
134
	.n = { .min = 2, .max = 16 },
128
        .m = { .min = 96, .max = 140 },
135
        .m = { .min = 96, .max = 140 },
129
        .m1 = { .min = 18, .max = 26 },
136
        .m1 = { .min = 18, .max = 26 },
130
        .m2 = { .min = 6, .max = 16 },
137
        .m2 = { .min = 6, .max = 16 },
131
        .p = { .min = 4, .max = 128 },
138
        .p = { .min = 4, .max = 128 },
132
        .p1 = { .min = 1, .max = 6 },
139
        .p1 = { .min = 1, .max = 6 },
133
	.p2 = { .dot_limit = 165000,
140
	.p2 = { .dot_limit = 165000,
134
		.p2_slow = 14, .p2_fast = 7 },
141
		.p2_slow = 14, .p2_fast = 7 },
135
};
142
};
136
 
143
 
137
static const intel_limit_t intel_limits_i9xx_sdvo = {
144
static const intel_limit_t intel_limits_i9xx_sdvo = {
138
        .dot = { .min = 20000, .max = 400000 },
145
        .dot = { .min = 20000, .max = 400000 },
139
        .vco = { .min = 1400000, .max = 2800000 },
146
        .vco = { .min = 1400000, .max = 2800000 },
140
        .n = { .min = 1, .max = 6 },
147
        .n = { .min = 1, .max = 6 },
141
        .m = { .min = 70, .max = 120 },
148
        .m = { .min = 70, .max = 120 },
142
	.m1 = { .min = 8, .max = 18 },
149
	.m1 = { .min = 8, .max = 18 },
143
	.m2 = { .min = 3, .max = 7 },
150
	.m2 = { .min = 3, .max = 7 },
144
        .p = { .min = 5, .max = 80 },
151
        .p = { .min = 5, .max = 80 },
145
        .p1 = { .min = 1, .max = 8 },
152
        .p1 = { .min = 1, .max = 8 },
146
	.p2 = { .dot_limit = 200000,
153
	.p2 = { .dot_limit = 200000,
147
		.p2_slow = 10, .p2_fast = 5 },
154
		.p2_slow = 10, .p2_fast = 5 },
148
};
155
};
149
 
156
 
150
static const intel_limit_t intel_limits_i9xx_lvds = {
157
static const intel_limit_t intel_limits_i9xx_lvds = {
151
        .dot = { .min = 20000, .max = 400000 },
158
        .dot = { .min = 20000, .max = 400000 },
152
        .vco = { .min = 1400000, .max = 2800000 },
159
        .vco = { .min = 1400000, .max = 2800000 },
153
        .n = { .min = 1, .max = 6 },
160
        .n = { .min = 1, .max = 6 },
154
        .m = { .min = 70, .max = 120 },
161
        .m = { .min = 70, .max = 120 },
155
	.m1 = { .min = 8, .max = 18 },
162
	.m1 = { .min = 8, .max = 18 },
156
	.m2 = { .min = 3, .max = 7 },
163
	.m2 = { .min = 3, .max = 7 },
157
        .p = { .min = 7, .max = 98 },
164
        .p = { .min = 7, .max = 98 },
158
        .p1 = { .min = 1, .max = 8 },
165
        .p1 = { .min = 1, .max = 8 },
159
	.p2 = { .dot_limit = 112000,
166
	.p2 = { .dot_limit = 112000,
160
		.p2_slow = 14, .p2_fast = 7 },
167
		.p2_slow = 14, .p2_fast = 7 },
161
};
168
};
162
 
169
 
163
 
170
 
164
static const intel_limit_t intel_limits_g4x_sdvo = {
171
static const intel_limit_t intel_limits_g4x_sdvo = {
165
	.dot = { .min = 25000, .max = 270000 },
172
	.dot = { .min = 25000, .max = 270000 },
166
	.vco = { .min = 1750000, .max = 3500000},
173
	.vco = { .min = 1750000, .max = 3500000},
167
	.n = { .min = 1, .max = 4 },
174
	.n = { .min = 1, .max = 4 },
168
	.m = { .min = 104, .max = 138 },
175
	.m = { .min = 104, .max = 138 },
169
	.m1 = { .min = 17, .max = 23 },
176
	.m1 = { .min = 17, .max = 23 },
170
	.m2 = { .min = 5, .max = 11 },
177
	.m2 = { .min = 5, .max = 11 },
171
	.p = { .min = 10, .max = 30 },
178
	.p = { .min = 10, .max = 30 },
172
	.p1 = { .min = 1, .max = 3},
179
	.p1 = { .min = 1, .max = 3},
173
	.p2 = { .dot_limit = 270000,
180
	.p2 = { .dot_limit = 270000,
174
		.p2_slow = 10,
181
		.p2_slow = 10,
175
		.p2_fast = 10
182
		.p2_fast = 10
176
	},
183
	},
177
};
184
};
178
 
185
 
179
static const intel_limit_t intel_limits_g4x_hdmi = {
186
static const intel_limit_t intel_limits_g4x_hdmi = {
180
	.dot = { .min = 22000, .max = 400000 },
187
	.dot = { .min = 22000, .max = 400000 },
181
	.vco = { .min = 1750000, .max = 3500000},
188
	.vco = { .min = 1750000, .max = 3500000},
182
	.n = { .min = 1, .max = 4 },
189
	.n = { .min = 1, .max = 4 },
183
	.m = { .min = 104, .max = 138 },
190
	.m = { .min = 104, .max = 138 },
184
	.m1 = { .min = 16, .max = 23 },
191
	.m1 = { .min = 16, .max = 23 },
185
	.m2 = { .min = 5, .max = 11 },
192
	.m2 = { .min = 5, .max = 11 },
186
	.p = { .min = 5, .max = 80 },
193
	.p = { .min = 5, .max = 80 },
187
	.p1 = { .min = 1, .max = 8},
194
	.p1 = { .min = 1, .max = 8},
188
	.p2 = { .dot_limit = 165000,
195
	.p2 = { .dot_limit = 165000,
189
		.p2_slow = 10, .p2_fast = 5 },
196
		.p2_slow = 10, .p2_fast = 5 },
190
};
197
};
191
 
198
 
192
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
199
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
193
	.dot = { .min = 20000, .max = 115000 },
200
	.dot = { .min = 20000, .max = 115000 },
194
	.vco = { .min = 1750000, .max = 3500000 },
201
	.vco = { .min = 1750000, .max = 3500000 },
195
	.n = { .min = 1, .max = 3 },
202
	.n = { .min = 1, .max = 3 },
196
	.m = { .min = 104, .max = 138 },
203
	.m = { .min = 104, .max = 138 },
197
	.m1 = { .min = 17, .max = 23 },
204
	.m1 = { .min = 17, .max = 23 },
198
	.m2 = { .min = 5, .max = 11 },
205
	.m2 = { .min = 5, .max = 11 },
199
	.p = { .min = 28, .max = 112 },
206
	.p = { .min = 28, .max = 112 },
200
	.p1 = { .min = 2, .max = 8 },
207
	.p1 = { .min = 2, .max = 8 },
201
	.p2 = { .dot_limit = 0,
208
	.p2 = { .dot_limit = 0,
202
		.p2_slow = 14, .p2_fast = 14
209
		.p2_slow = 14, .p2_fast = 14
203
	},
210
	},
204
};
211
};
205
 
212
 
206
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
213
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
207
	.dot = { .min = 80000, .max = 224000 },
214
	.dot = { .min = 80000, .max = 224000 },
208
	.vco = { .min = 1750000, .max = 3500000 },
215
	.vco = { .min = 1750000, .max = 3500000 },
209
	.n = { .min = 1, .max = 3 },
216
	.n = { .min = 1, .max = 3 },
210
	.m = { .min = 104, .max = 138 },
217
	.m = { .min = 104, .max = 138 },
211
	.m1 = { .min = 17, .max = 23 },
218
	.m1 = { .min = 17, .max = 23 },
212
	.m2 = { .min = 5, .max = 11 },
219
	.m2 = { .min = 5, .max = 11 },
213
	.p = { .min = 14, .max = 42 },
220
	.p = { .min = 14, .max = 42 },
214
	.p1 = { .min = 2, .max = 6 },
221
	.p1 = { .min = 2, .max = 6 },
215
	.p2 = { .dot_limit = 0,
222
	.p2 = { .dot_limit = 0,
216
		.p2_slow = 7, .p2_fast = 7
223
		.p2_slow = 7, .p2_fast = 7
217
	},
224
	},
218
};
225
};
219
 
226
 
220
static const intel_limit_t intel_limits_pineview_sdvo = {
227
static const intel_limit_t intel_limits_pineview_sdvo = {
221
        .dot = { .min = 20000, .max = 400000},
228
        .dot = { .min = 20000, .max = 400000},
222
        .vco = { .min = 1700000, .max = 3500000 },
229
        .vco = { .min = 1700000, .max = 3500000 },
223
	/* Pineview's Ncounter is a ring counter */
230
	/* Pineview's Ncounter is a ring counter */
224
        .n = { .min = 3, .max = 6 },
231
        .n = { .min = 3, .max = 6 },
225
        .m = { .min = 2, .max = 256 },
232
        .m = { .min = 2, .max = 256 },
226
	/* Pineview only has one combined m divider, which we treat as m2. */
233
	/* Pineview only has one combined m divider, which we treat as m2. */
227
        .m1 = { .min = 0, .max = 0 },
234
        .m1 = { .min = 0, .max = 0 },
228
        .m2 = { .min = 0, .max = 254 },
235
        .m2 = { .min = 0, .max = 254 },
229
        .p = { .min = 5, .max = 80 },
236
        .p = { .min = 5, .max = 80 },
230
        .p1 = { .min = 1, .max = 8 },
237
        .p1 = { .min = 1, .max = 8 },
231
	.p2 = { .dot_limit = 200000,
238
	.p2 = { .dot_limit = 200000,
232
		.p2_slow = 10, .p2_fast = 5 },
239
		.p2_slow = 10, .p2_fast = 5 },
233
};
240
};
234
 
241
 
235
static const intel_limit_t intel_limits_pineview_lvds = {
242
static const intel_limit_t intel_limits_pineview_lvds = {
236
        .dot = { .min = 20000, .max = 400000 },
243
        .dot = { .min = 20000, .max = 400000 },
237
        .vco = { .min = 1700000, .max = 3500000 },
244
        .vco = { .min = 1700000, .max = 3500000 },
238
        .n = { .min = 3, .max = 6 },
245
        .n = { .min = 3, .max = 6 },
239
        .m = { .min = 2, .max = 256 },
246
        .m = { .min = 2, .max = 256 },
240
        .m1 = { .min = 0, .max = 0 },
247
        .m1 = { .min = 0, .max = 0 },
241
        .m2 = { .min = 0, .max = 254 },
248
        .m2 = { .min = 0, .max = 254 },
242
        .p = { .min = 7, .max = 112 },
249
        .p = { .min = 7, .max = 112 },
243
        .p1 = { .min = 1, .max = 8 },
250
        .p1 = { .min = 1, .max = 8 },
244
	.p2 = { .dot_limit = 112000,
251
	.p2 = { .dot_limit = 112000,
245
		.p2_slow = 14, .p2_fast = 14 },
252
		.p2_slow = 14, .p2_fast = 14 },
246
};
253
};
247
 
254
 
248
/* Ironlake / Sandybridge
255
/* Ironlake / Sandybridge
249
 *
256
 *
250
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
257
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
251
 * the range value for them is (actual_value - 2).
258
 * the range value for them is (actual_value - 2).
252
 */
259
 */
253
static const intel_limit_t intel_limits_ironlake_dac = {
260
static const intel_limit_t intel_limits_ironlake_dac = {
254
	.dot = { .min = 25000, .max = 350000 },
261
	.dot = { .min = 25000, .max = 350000 },
255
	.vco = { .min = 1760000, .max = 3510000 },
262
	.vco = { .min = 1760000, .max = 3510000 },
256
	.n = { .min = 1, .max = 5 },
263
	.n = { .min = 1, .max = 5 },
257
	.m = { .min = 79, .max = 127 },
264
	.m = { .min = 79, .max = 127 },
258
	.m1 = { .min = 12, .max = 22 },
265
	.m1 = { .min = 12, .max = 22 },
259
	.m2 = { .min = 5, .max = 9 },
266
	.m2 = { .min = 5, .max = 9 },
260
	.p = { .min = 5, .max = 80 },
267
	.p = { .min = 5, .max = 80 },
261
	.p1 = { .min = 1, .max = 8 },
268
	.p1 = { .min = 1, .max = 8 },
262
	.p2 = { .dot_limit = 225000,
269
	.p2 = { .dot_limit = 225000,
263
		.p2_slow = 10, .p2_fast = 5 },
270
		.p2_slow = 10, .p2_fast = 5 },
264
};
271
};
265
 
272
 
266
static const intel_limit_t intel_limits_ironlake_single_lvds = {
273
static const intel_limit_t intel_limits_ironlake_single_lvds = {
267
	.dot = { .min = 25000, .max = 350000 },
274
	.dot = { .min = 25000, .max = 350000 },
268
	.vco = { .min = 1760000, .max = 3510000 },
275
	.vco = { .min = 1760000, .max = 3510000 },
269
	.n = { .min = 1, .max = 3 },
276
	.n = { .min = 1, .max = 3 },
270
	.m = { .min = 79, .max = 118 },
277
	.m = { .min = 79, .max = 118 },
271
	.m1 = { .min = 12, .max = 22 },
278
	.m1 = { .min = 12, .max = 22 },
272
	.m2 = { .min = 5, .max = 9 },
279
	.m2 = { .min = 5, .max = 9 },
273
	.p = { .min = 28, .max = 112 },
280
	.p = { .min = 28, .max = 112 },
274
	.p1 = { .min = 2, .max = 8 },
281
	.p1 = { .min = 2, .max = 8 },
275
	.p2 = { .dot_limit = 225000,
282
	.p2 = { .dot_limit = 225000,
276
		.p2_slow = 14, .p2_fast = 14 },
283
		.p2_slow = 14, .p2_fast = 14 },
277
};
284
};
278
 
285
 
279
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
286
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
280
	.dot = { .min = 25000, .max = 350000 },
287
	.dot = { .min = 25000, .max = 350000 },
281
	.vco = { .min = 1760000, .max = 3510000 },
288
	.vco = { .min = 1760000, .max = 3510000 },
282
	.n = { .min = 1, .max = 3 },
289
	.n = { .min = 1, .max = 3 },
283
	.m = { .min = 79, .max = 127 },
290
	.m = { .min = 79, .max = 127 },
284
	.m1 = { .min = 12, .max = 22 },
291
	.m1 = { .min = 12, .max = 22 },
285
	.m2 = { .min = 5, .max = 9 },
292
	.m2 = { .min = 5, .max = 9 },
286
	.p = { .min = 14, .max = 56 },
293
	.p = { .min = 14, .max = 56 },
287
	.p1 = { .min = 2, .max = 8 },
294
	.p1 = { .min = 2, .max = 8 },
288
	.p2 = { .dot_limit = 225000,
295
	.p2 = { .dot_limit = 225000,
289
		.p2_slow = 7, .p2_fast = 7 },
296
		.p2_slow = 7, .p2_fast = 7 },
290
};
297
};
291
 
298
 
292
/* LVDS 100mhz refclk limits. */
299
/* LVDS 100mhz refclk limits. */
293
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
300
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
294
	.dot = { .min = 25000, .max = 350000 },
301
	.dot = { .min = 25000, .max = 350000 },
295
	.vco = { .min = 1760000, .max = 3510000 },
302
	.vco = { .min = 1760000, .max = 3510000 },
296
	.n = { .min = 1, .max = 2 },
303
	.n = { .min = 1, .max = 2 },
297
	.m = { .min = 79, .max = 126 },
304
	.m = { .min = 79, .max = 126 },
298
	.m1 = { .min = 12, .max = 22 },
305
	.m1 = { .min = 12, .max = 22 },
299
	.m2 = { .min = 5, .max = 9 },
306
	.m2 = { .min = 5, .max = 9 },
300
	.p = { .min = 28, .max = 112 },
307
	.p = { .min = 28, .max = 112 },
301
	.p1 = { .min = 2, .max = 8 },
308
	.p1 = { .min = 2, .max = 8 },
302
	.p2 = { .dot_limit = 225000,
309
	.p2 = { .dot_limit = 225000,
303
		.p2_slow = 14, .p2_fast = 14 },
310
		.p2_slow = 14, .p2_fast = 14 },
304
};
311
};
305
 
312
 
306
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
313
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
307
	.dot = { .min = 25000, .max = 350000 },
314
	.dot = { .min = 25000, .max = 350000 },
308
	.vco = { .min = 1760000, .max = 3510000 },
315
	.vco = { .min = 1760000, .max = 3510000 },
309
	.n = { .min = 1, .max = 3 },
316
	.n = { .min = 1, .max = 3 },
310
	.m = { .min = 79, .max = 126 },
317
	.m = { .min = 79, .max = 126 },
311
	.m1 = { .min = 12, .max = 22 },
318
	.m1 = { .min = 12, .max = 22 },
312
	.m2 = { .min = 5, .max = 9 },
319
	.m2 = { .min = 5, .max = 9 },
313
	.p = { .min = 14, .max = 42 },
320
	.p = { .min = 14, .max = 42 },
314
	.p1 = { .min = 2, .max = 6 },
321
	.p1 = { .min = 2, .max = 6 },
315
	.p2 = { .dot_limit = 225000,
322
	.p2 = { .dot_limit = 225000,
316
		.p2_slow = 7, .p2_fast = 7 },
323
		.p2_slow = 7, .p2_fast = 7 },
317
};
324
};
318
 
325
 
-
 
326
static const intel_limit_t intel_limits_vlv = {
319
static const intel_limit_t intel_limits_vlv_dac = {
327
	 /*
320
	.dot = { .min = 25000, .max = 270000 },
328
	  * These are the data rate limits (measured in fast clocks)
321
	.vco = { .min = 4000000, .max = 6000000 },
-
 
322
	.n = { .min = 1, .max = 7 },
329
	  * since those are the strictest limits we have. The fast
323
	.m = { .min = 22, .max = 450 }, /* guess */
-
 
324
	.m1 = { .min = 2, .max = 3 },
-
 
325
	.m2 = { .min = 11, .max = 156 },
330
	  * clock and actual rate limits are more relaxed, so checking
326
	.p = { .min = 10, .max = 30 },
-
 
327
	.p1 = { .min = 1, .max = 3 },
-
 
328
	.p2 = { .dot_limit = 270000,
-
 
329
		.p2_slow = 2, .p2_fast = 20 },
331
	  * them would make no difference.
330
};
-
 
331
 
-
 
332
static const intel_limit_t intel_limits_vlv_hdmi = {
332
	  */
333
	.dot = { .min = 25000, .max = 270000 },
333
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
334
	.vco = { .min = 4000000, .max = 6000000 },
334
	.vco = { .min = 4000000, .max = 6000000 },
335
	.n = { .min = 1, .max = 7 },
-
 
336
	.m = { .min = 60, .max = 300 }, /* guess */
335
	.n = { .min = 1, .max = 7 },
337
	.m1 = { .min = 2, .max = 3 },
336
	.m1 = { .min = 2, .max = 3 },
338
	.m2 = { .min = 11, .max = 156 },
-
 
339
	.p = { .min = 10, .max = 30 },
337
	.m2 = { .min = 11, .max = 156 },
340
	.p1 = { .min = 2, .max = 3 },
-
 
341
	.p2 = { .dot_limit = 270000,
338
	.p1 = { .min = 2, .max = 3 },
342
		.p2_slow = 2, .p2_fast = 20 },
339
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
343
};
340
};
-
 
341
 
344
 
342
static void vlv_clock(int refclk, intel_clock_t *clock)
345
static const intel_limit_t intel_limits_vlv_dp = {
343
{
346
	.dot = { .min = 25000, .max = 270000 },
344
	clock->m = clock->m1 * clock->m2;
-
 
345
	clock->p = clock->p1 * clock->p2;
347
	.vco = { .min = 4000000, .max = 6000000 },
346
	if (WARN_ON(clock->n == 0 || clock->p == 0))
348
	.n = { .min = 1, .max = 7 },
347
		return;
-
 
348
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
-
 
349
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
-
 
350
}
-
 
351
 
-
 
352
/**
-
 
353
 * Returns whether any output on the specified pipe is of the specified type
-
 
354
 */
349
	.m = { .min = 22, .max = 450 },
355
static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
350
	.m1 = { .min = 2, .max = 3 },
356
{
-
 
357
	struct drm_device *dev = crtc->dev;
351
	.m2 = { .min = 11, .max = 156 },
358
	struct intel_encoder *encoder;
352
	.p = { .min = 10, .max = 30 },
359
 
353
	.p1 = { .min = 1, .max = 3 },
360
	for_each_encoder_on_crtc(dev, crtc, encoder)
-
 
361
		if (encoder->type == type)
-
 
362
			return true;
354
	.p2 = { .dot_limit = 270000,
363
 
355
		.p2_slow = 2, .p2_fast = 20 },
364
	return false;
356
};
365
}
357
 
366
 
358
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
367
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
359
						int refclk)
368
						int refclk)
360
{
369
{
361
	struct drm_device *dev = crtc->dev;
370
	struct drm_device *dev = crtc->dev;
362
	const intel_limit_t *limit;
371
	const intel_limit_t *limit;
363
 
372
 
364
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
373
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
365
		if (intel_is_dual_link_lvds(dev)) {
374
		if (intel_is_dual_link_lvds(dev)) {
366
			if (refclk == 100000)
375
			if (refclk == 100000)
367
				limit = &intel_limits_ironlake_dual_lvds_100m;
376
				limit = &intel_limits_ironlake_dual_lvds_100m;
368
			else
377
			else
369
				limit = &intel_limits_ironlake_dual_lvds;
378
				limit = &intel_limits_ironlake_dual_lvds;
370
		} else {
379
		} else {
371
			if (refclk == 100000)
380
			if (refclk == 100000)
372
				limit = &intel_limits_ironlake_single_lvds_100m;
381
				limit = &intel_limits_ironlake_single_lvds_100m;
373
			else
382
			else
374
				limit = &intel_limits_ironlake_single_lvds;
383
				limit = &intel_limits_ironlake_single_lvds;
375
		}
384
		}
376
	} else
385
	} else
377
		limit = &intel_limits_ironlake_dac;
386
		limit = &intel_limits_ironlake_dac;
378
 
387
 
379
	return limit;
388
	return limit;
380
}
389
}
381
 
390
 
382
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
391
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
383
{
392
{
384
	struct drm_device *dev = crtc->dev;
393
	struct drm_device *dev = crtc->dev;
385
	const intel_limit_t *limit;
394
	const intel_limit_t *limit;
386
 
395
 
387
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
396
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
388
		if (intel_is_dual_link_lvds(dev))
397
		if (intel_is_dual_link_lvds(dev))
389
			limit = &intel_limits_g4x_dual_channel_lvds;
398
			limit = &intel_limits_g4x_dual_channel_lvds;
390
		else
399
		else
391
			limit = &intel_limits_g4x_single_channel_lvds;
400
			limit = &intel_limits_g4x_single_channel_lvds;
392
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
401
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
393
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
402
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
394
		limit = &intel_limits_g4x_hdmi;
403
		limit = &intel_limits_g4x_hdmi;
395
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
404
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
396
		limit = &intel_limits_g4x_sdvo;
405
		limit = &intel_limits_g4x_sdvo;
397
	} else /* The option is for other outputs */
406
	} else /* The option is for other outputs */
398
		limit = &intel_limits_i9xx_sdvo;
407
		limit = &intel_limits_i9xx_sdvo;
399
 
408
 
400
	return limit;
409
	return limit;
401
}
410
}
402
 
411
 
403
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
412
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
404
{
413
{
405
	struct drm_device *dev = crtc->dev;
414
	struct drm_device *dev = crtc->dev;
406
	const intel_limit_t *limit;
415
	const intel_limit_t *limit;
407
 
416
 
408
	if (HAS_PCH_SPLIT(dev))
417
	if (HAS_PCH_SPLIT(dev))
409
		limit = intel_ironlake_limit(crtc, refclk);
418
		limit = intel_ironlake_limit(crtc, refclk);
410
	else if (IS_G4X(dev)) {
419
	else if (IS_G4X(dev)) {
411
		limit = intel_g4x_limit(crtc);
420
		limit = intel_g4x_limit(crtc);
412
	} else if (IS_PINEVIEW(dev)) {
421
	} else if (IS_PINEVIEW(dev)) {
413
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
422
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
414
			limit = &intel_limits_pineview_lvds;
423
			limit = &intel_limits_pineview_lvds;
415
		else
424
		else
416
			limit = &intel_limits_pineview_sdvo;
425
			limit = &intel_limits_pineview_sdvo;
417
	} else if (IS_VALLEYVIEW(dev)) {
426
	} else if (IS_VALLEYVIEW(dev)) {
418
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
-
 
419
			limit = &intel_limits_vlv_dac;
-
 
420
		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
-
 
421
			limit = &intel_limits_vlv_hdmi;
-
 
422
		else
-
 
423
			limit = &intel_limits_vlv_dp;
427
		limit = &intel_limits_vlv;
424
	} else if (!IS_GEN2(dev)) {
428
	} else if (!IS_GEN2(dev)) {
425
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
429
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
426
			limit = &intel_limits_i9xx_lvds;
430
			limit = &intel_limits_i9xx_lvds;
427
		else
431
		else
428
			limit = &intel_limits_i9xx_sdvo;
432
			limit = &intel_limits_i9xx_sdvo;
429
	} else {
433
	} else {
430
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
434
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
431
			limit = &intel_limits_i8xx_lvds;
435
			limit = &intel_limits_i8xx_lvds;
432
		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
436
		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
433
			limit = &intel_limits_i8xx_dvo;
437
			limit = &intel_limits_i8xx_dvo;
434
		else
438
		else
435
			limit = &intel_limits_i8xx_dac;
439
			limit = &intel_limits_i8xx_dac;
436
	}
440
	}
437
	return limit;
441
	return limit;
438
}
442
}
439
 
443
 
440
/* m1 is reserved as 0 in Pineview, n is a ring counter */
444
/* m1 is reserved as 0 in Pineview, n is a ring counter */
441
static void pineview_clock(int refclk, intel_clock_t *clock)
445
static void pineview_clock(int refclk, intel_clock_t *clock)
442
{
446
{
443
	clock->m = clock->m2 + 2;
447
	clock->m = clock->m2 + 2;
444
	clock->p = clock->p1 * clock->p2;
448
	clock->p = clock->p1 * clock->p2;
-
 
449
	if (WARN_ON(clock->n == 0 || clock->p == 0))
-
 
450
		return;
445
	clock->vco = refclk * clock->m / clock->n;
451
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
446
	clock->dot = clock->vco / clock->p;
452
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
447
}
453
}
448
 
454
 
449
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
455
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
450
{
456
{
451
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
457
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
452
}
458
}
453
 
459
 
454
static void i9xx_clock(int refclk, intel_clock_t *clock)
460
static void i9xx_clock(int refclk, intel_clock_t *clock)
455
{
461
{
456
	clock->m = i9xx_dpll_compute_m(clock);
462
	clock->m = i9xx_dpll_compute_m(clock);
457
	clock->p = clock->p1 * clock->p2;
463
	clock->p = clock->p1 * clock->p2;
458
	clock->vco = refclk * clock->m / (clock->n + 2);
464
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
459
	clock->dot = clock->vco / clock->p;
-
 
460
}
-
 
461
 
-
 
462
/**
-
 
463
 * Returns whether any output on the specified pipe is of the specified type
-
 
464
 */
465
		return;
465
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
466
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
466
{
-
 
467
	struct drm_device *dev = crtc->dev;
-
 
468
	struct intel_encoder *encoder;
-
 
469
 
-
 
470
	for_each_encoder_on_crtc(dev, crtc, encoder)
467
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
471
		if (encoder->type == type)
-
 
472
			return true;
-
 
473
 
-
 
474
	return false;
-
 
475
}
468
}
476
 
469
 
477
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
470
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
478
/**
471
/**
479
 * Returns whether the given set of divisors are valid for a given refclk with
472
 * Returns whether the given set of divisors are valid for a given refclk with
480
 * the given connectors.
473
 * the given connectors.
481
 */
474
 */
482
 
475
 
483
static bool intel_PLL_is_valid(struct drm_device *dev,
476
static bool intel_PLL_is_valid(struct drm_device *dev,
484
			       const intel_limit_t *limit,
477
			       const intel_limit_t *limit,
485
			       const intel_clock_t *clock)
478
			       const intel_clock_t *clock)
486
{
479
{
-
 
480
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
-
 
481
		INTELPllInvalid("n out of range\n");
487
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
482
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
488
		INTELPllInvalid("p1 out of range\n");
483
		INTELPllInvalid("p1 out of range\n");
489
	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
-
 
490
		INTELPllInvalid("p out of range\n");
-
 
491
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
484
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
492
		INTELPllInvalid("m2 out of range\n");
485
		INTELPllInvalid("m2 out of range\n");
493
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
486
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
494
		INTELPllInvalid("m1 out of range\n");
487
		INTELPllInvalid("m1 out of range\n");
-
 
488
 
-
 
489
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
495
	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
490
		if (clock->m1 <= clock->m2)
496
		INTELPllInvalid("m1 <= m2\n");
491
		INTELPllInvalid("m1 <= m2\n");
-
 
492
 
-
 
493
	if (!IS_VALLEYVIEW(dev)) {
-
 
494
		if (clock->p < limit->p.min || limit->p.max < clock->p)
-
 
495
			INTELPllInvalid("p out of range\n");
497
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
496
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
498
		INTELPllInvalid("m out of range\n");
497
		INTELPllInvalid("m out of range\n");
499
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
-
 
500
		INTELPllInvalid("n out of range\n");
-
 
-
 
498
	}
-
 
499
 
501
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
500
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
502
		INTELPllInvalid("vco out of range\n");
501
		INTELPllInvalid("vco out of range\n");
503
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
502
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
504
	 * connector, etc., rather than just a single range.
503
	 * connector, etc., rather than just a single range.
505
	 */
504
	 */
506
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
505
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
507
		INTELPllInvalid("dot out of range\n");
506
		INTELPllInvalid("dot out of range\n");
508
 
507
 
509
	return true;
508
	return true;
510
}
509
}
511
 
510
 
512
static bool
511
static bool
513
i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
512
i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
514
		    int target, int refclk, intel_clock_t *match_clock,
513
		    int target, int refclk, intel_clock_t *match_clock,
515
		    intel_clock_t *best_clock)
514
		    intel_clock_t *best_clock)
516
{
515
{
517
	struct drm_device *dev = crtc->dev;
516
	struct drm_device *dev = crtc->dev;
518
	intel_clock_t clock;
517
	intel_clock_t clock;
519
	int err = target;
518
	int err = target;
520
 
519
 
521
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
520
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
522
		/*
521
		/*
523
		 * For LVDS just rely on its current settings for dual-channel.
522
		 * For LVDS just rely on its current settings for dual-channel.
524
		 * We haven't figured out how to reliably set up different
523
		 * We haven't figured out how to reliably set up different
525
		 * single/dual channel state, if we even can.
524
		 * single/dual channel state, if we even can.
526
		 */
525
		 */
527
		if (intel_is_dual_link_lvds(dev))
526
		if (intel_is_dual_link_lvds(dev))
528
			clock.p2 = limit->p2.p2_fast;
527
			clock.p2 = limit->p2.p2_fast;
529
		else
528
		else
530
			clock.p2 = limit->p2.p2_slow;
529
			clock.p2 = limit->p2.p2_slow;
531
	} else {
530
	} else {
532
		if (target < limit->p2.dot_limit)
531
		if (target < limit->p2.dot_limit)
533
			clock.p2 = limit->p2.p2_slow;
532
			clock.p2 = limit->p2.p2_slow;
534
		else
533
		else
535
			clock.p2 = limit->p2.p2_fast;
534
			clock.p2 = limit->p2.p2_fast;
536
	}
535
	}
537
 
536
 
538
	memset(best_clock, 0, sizeof(*best_clock));
537
	memset(best_clock, 0, sizeof(*best_clock));
539
 
538
 
540
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
539
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
541
	     clock.m1++) {
540
	     clock.m1++) {
542
		for (clock.m2 = limit->m2.min;
541
		for (clock.m2 = limit->m2.min;
543
		     clock.m2 <= limit->m2.max; clock.m2++) {
542
		     clock.m2 <= limit->m2.max; clock.m2++) {
544
			if (clock.m2 >= clock.m1)
543
			if (clock.m2 >= clock.m1)
545
				break;
544
				break;
546
			for (clock.n = limit->n.min;
545
			for (clock.n = limit->n.min;
547
			     clock.n <= limit->n.max; clock.n++) {
546
			     clock.n <= limit->n.max; clock.n++) {
548
				for (clock.p1 = limit->p1.min;
547
				for (clock.p1 = limit->p1.min;
549
					clock.p1 <= limit->p1.max; clock.p1++) {
548
					clock.p1 <= limit->p1.max; clock.p1++) {
550
					int this_err;
549
					int this_err;
551
 
550
 
552
					i9xx_clock(refclk, &clock);
551
					i9xx_clock(refclk, &clock);
553
					if (!intel_PLL_is_valid(dev, limit,
552
					if (!intel_PLL_is_valid(dev, limit,
554
								&clock))
553
								&clock))
555
						continue;
554
						continue;
556
					if (match_clock &&
555
					if (match_clock &&
557
					    clock.p != match_clock->p)
556
					    clock.p != match_clock->p)
558
						continue;
557
						continue;
559
 
558
 
560
					this_err = abs(clock.dot - target);
559
					this_err = abs(clock.dot - target);
561
					if (this_err < err) {
560
					if (this_err < err) {
562
						*best_clock = clock;
561
						*best_clock = clock;
563
						err = this_err;
562
						err = this_err;
564
					}
563
					}
565
				}
564
				}
566
			}
565
			}
567
		}
566
		}
568
	}
567
	}
569
 
568
 
570
	return (err != target);
569
	return (err != target);
571
}
570
}
572
 
571
 
573
static bool
572
static bool
574
pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
573
pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
575
		   int target, int refclk, intel_clock_t *match_clock,
574
		   int target, int refclk, intel_clock_t *match_clock,
576
		   intel_clock_t *best_clock)
575
		   intel_clock_t *best_clock)
577
{
576
{
578
	struct drm_device *dev = crtc->dev;
577
	struct drm_device *dev = crtc->dev;
579
	intel_clock_t clock;
578
	intel_clock_t clock;
580
	int err = target;
579
	int err = target;
581
 
580
 
582
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
581
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
583
		/*
582
		/*
584
		 * For LVDS just rely on its current settings for dual-channel.
583
		 * For LVDS just rely on its current settings for dual-channel.
585
		 * We haven't figured out how to reliably set up different
584
		 * We haven't figured out how to reliably set up different
586
		 * single/dual channel state, if we even can.
585
		 * single/dual channel state, if we even can.
587
		 */
586
		 */
588
		if (intel_is_dual_link_lvds(dev))
587
		if (intel_is_dual_link_lvds(dev))
589
			clock.p2 = limit->p2.p2_fast;
588
			clock.p2 = limit->p2.p2_fast;
590
		else
589
		else
591
			clock.p2 = limit->p2.p2_slow;
590
			clock.p2 = limit->p2.p2_slow;
592
	} else {
591
	} else {
593
		if (target < limit->p2.dot_limit)
592
		if (target < limit->p2.dot_limit)
594
			clock.p2 = limit->p2.p2_slow;
593
			clock.p2 = limit->p2.p2_slow;
595
		else
594
		else
596
			clock.p2 = limit->p2.p2_fast;
595
			clock.p2 = limit->p2.p2_fast;
597
	}
596
	}
598
 
597
 
599
	memset(best_clock, 0, sizeof(*best_clock));
598
	memset(best_clock, 0, sizeof(*best_clock));
600
 
599
 
601
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
600
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
602
	     clock.m1++) {
601
	     clock.m1++) {
603
		for (clock.m2 = limit->m2.min;
602
		for (clock.m2 = limit->m2.min;
604
		     clock.m2 <= limit->m2.max; clock.m2++) {
603
		     clock.m2 <= limit->m2.max; clock.m2++) {
605
			for (clock.n = limit->n.min;
604
			for (clock.n = limit->n.min;
606
			     clock.n <= limit->n.max; clock.n++) {
605
			     clock.n <= limit->n.max; clock.n++) {
607
				for (clock.p1 = limit->p1.min;
606
				for (clock.p1 = limit->p1.min;
608
					clock.p1 <= limit->p1.max; clock.p1++) {
607
					clock.p1 <= limit->p1.max; clock.p1++) {
609
					int this_err;
608
					int this_err;
610
 
609
 
611
					pineview_clock(refclk, &clock);
610
					pineview_clock(refclk, &clock);
612
					if (!intel_PLL_is_valid(dev, limit,
611
					if (!intel_PLL_is_valid(dev, limit,
613
								&clock))
612
								&clock))
614
						continue;
613
						continue;
615
					if (match_clock &&
614
					if (match_clock &&
616
					    clock.p != match_clock->p)
615
					    clock.p != match_clock->p)
617
						continue;
616
						continue;
618
 
617
 
619
					this_err = abs(clock.dot - target);
618
					this_err = abs(clock.dot - target);
620
					if (this_err < err) {
619
					if (this_err < err) {
621
						*best_clock = clock;
620
						*best_clock = clock;
622
						err = this_err;
621
						err = this_err;
623
					}
622
					}
624
				}
623
				}
625
			}
624
			}
626
		}
625
		}
627
	}
626
	}
628
 
627
 
629
	return (err != target);
628
	return (err != target);
630
}
629
}
631
 
630
 
632
static bool
631
static bool
633
g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
632
g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
634
			int target, int refclk, intel_clock_t *match_clock,
633
			int target, int refclk, intel_clock_t *match_clock,
635
			intel_clock_t *best_clock)
634
			intel_clock_t *best_clock)
636
{
635
{
637
	struct drm_device *dev = crtc->dev;
636
	struct drm_device *dev = crtc->dev;
638
	intel_clock_t clock;
637
	intel_clock_t clock;
639
	int max_n;
638
	int max_n;
640
	bool found;
639
	bool found;
641
	/* approximately equals target * 0.00585 */
640
	/* approximately equals target * 0.00585 */
642
	int err_most = (target >> 8) + (target >> 9);
641
	int err_most = (target >> 8) + (target >> 9);
643
	found = false;
642
	found = false;
644
 
643
 
645
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
644
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
646
		if (intel_is_dual_link_lvds(dev))
645
		if (intel_is_dual_link_lvds(dev))
647
			clock.p2 = limit->p2.p2_fast;
646
			clock.p2 = limit->p2.p2_fast;
648
		else
647
		else
649
			clock.p2 = limit->p2.p2_slow;
648
			clock.p2 = limit->p2.p2_slow;
650
	} else {
649
	} else {
651
		if (target < limit->p2.dot_limit)
650
		if (target < limit->p2.dot_limit)
652
			clock.p2 = limit->p2.p2_slow;
651
			clock.p2 = limit->p2.p2_slow;
653
		else
652
		else
654
			clock.p2 = limit->p2.p2_fast;
653
			clock.p2 = limit->p2.p2_fast;
655
	}
654
	}
656
 
655
 
657
	memset(best_clock, 0, sizeof(*best_clock));
656
	memset(best_clock, 0, sizeof(*best_clock));
658
	max_n = limit->n.max;
657
	max_n = limit->n.max;
659
	/* based on hardware requirement, prefer smaller n to precision */
658
	/* based on hardware requirement, prefer smaller n to precision */
660
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
659
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
661
		/* based on hardware requirement, prefere larger m1,m2 */
660
		/* based on hardware requirement, prefere larger m1,m2 */
662
		for (clock.m1 = limit->m1.max;
661
		for (clock.m1 = limit->m1.max;
663
		     clock.m1 >= limit->m1.min; clock.m1--) {
662
		     clock.m1 >= limit->m1.min; clock.m1--) {
664
			for (clock.m2 = limit->m2.max;
663
			for (clock.m2 = limit->m2.max;
665
			     clock.m2 >= limit->m2.min; clock.m2--) {
664
			     clock.m2 >= limit->m2.min; clock.m2--) {
666
				for (clock.p1 = limit->p1.max;
665
				for (clock.p1 = limit->p1.max;
667
				     clock.p1 >= limit->p1.min; clock.p1--) {
666
				     clock.p1 >= limit->p1.min; clock.p1--) {
668
					int this_err;
667
					int this_err;
669
 
668
 
670
					i9xx_clock(refclk, &clock);
669
					i9xx_clock(refclk, &clock);
671
					if (!intel_PLL_is_valid(dev, limit,
670
					if (!intel_PLL_is_valid(dev, limit,
672
								&clock))
671
								&clock))
673
						continue;
672
						continue;
674
 
673
 
675
					this_err = abs(clock.dot - target);
674
					this_err = abs(clock.dot - target);
676
					if (this_err < err_most) {
675
					if (this_err < err_most) {
677
						*best_clock = clock;
676
						*best_clock = clock;
678
						err_most = this_err;
677
						err_most = this_err;
679
						max_n = clock.n;
678
						max_n = clock.n;
680
						found = true;
679
						found = true;
681
					}
680
					}
682
				}
681
				}
683
			}
682
			}
684
		}
683
		}
685
	}
684
	}
686
	return found;
685
	return found;
687
}
686
}
688
 
687
 
689
static bool
688
static bool
690
vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
689
vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
691
			int target, int refclk, intel_clock_t *match_clock,
690
			int target, int refclk, intel_clock_t *match_clock,
692
			intel_clock_t *best_clock)
691
			intel_clock_t *best_clock)
693
{
692
{
694
	u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
693
	struct drm_device *dev = crtc->dev;
695
	u32 m, n, fastclk;
694
	intel_clock_t clock;
-
 
695
	unsigned int bestppm = 1000000;
696
	u32 updrate, minupdate, p;
696
	/* min update 19.2 MHz */
697
	unsigned long bestppm, ppm, absppm;
697
	int max_n = min(limit->n.max, refclk / 19200);
698
	int dotclk, flag;
698
	bool found = false;
699
 
699
 
700
	flag = 0;
-
 
701
	dotclk = target * 1000;
-
 
702
	bestppm = 1000000;
-
 
703
	ppm = absppm = 0;
-
 
704
	fastclk = dotclk / (2*100);
700
	target *= 5; /* fast clock */
705
	updrate = 0;
701
 
706
	minupdate = 19200;
-
 
707
	n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
-
 
708
	bestm1 = bestm2 = bestp1 = bestp2 = 0;
702
	memset(best_clock, 0, sizeof(*best_clock));
709
 
703
 
710
	/* based on hardware requirement, prefer smaller n to precision */
704
	/* based on hardware requirement, prefer smaller n to precision */
711
	for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
-
 
712
		updrate = refclk / n;
705
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
713
		for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
706
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
714
			for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
-
 
715
				if (p2 > 10)
707
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
716
					p2 = p2 - 1;
708
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
717
				p = p1 * p2;
709
				clock.p = clock.p1 * clock.p2;
718
				/* based on hardware requirement, prefer bigger m1,m2 values */
710
				/* based on hardware requirement, prefer bigger m1,m2 values */
719
				for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
711
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
-
 
712
					unsigned int ppm, diff;
720
					m2 = (((2*(fastclk * p * n / m1 )) +
713
 
721
					       refclk) / (2*refclk));
714
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
-
 
715
								     refclk * clock.m1);
722
					m = m1 * m2;
716
 
-
 
717
					vlv_clock(refclk, &clock);
723
					vco = updrate * m;
718
 
-
 
719
					if (!intel_PLL_is_valid(dev, limit,
-
 
720
								&clock))
-
 
721
						continue;
724
					if (vco >= limit->vco.min && vco < limit->vco.max) {
722
 
725
						ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
723
					diff = abs(clock.dot - target);
-
 
724
					ppm = div_u64(1000000ULL * diff, target);
726
						absppm = (ppm > 0) ? ppm : (-ppm);
725
 
727
						if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
726
					if (ppm < 100 && clock.p > best_clock->p) {
728
							bestppm = 0;
-
 
729
							flag = 1;
-
 
730
						}
-
 
731
						if (absppm < bestppm - 10) {
727
							bestppm = 0;
732
							bestppm = absppm;
728
						*best_clock = clock;
733
							flag = 1;
729
						found = true;
734
						}
-
 
735
						if (flag) {
-
 
-
 
730
						}
736
							bestn = n;
731
 
737
							bestm1 = m1;
-
 
738
							bestm2 = m2;
732
					if (bestppm >= 10 && ppm < bestppm - 10) {
739
							bestp1 = p1;
733
						bestppm = ppm;
740
							bestp2 = p2;
734
						*best_clock = clock;
741
							flag = 0;
735
						found = true;
742
						}
736
						}
743
					}
737
						}
744
				}
738
					}
745
			}
739
				}
746
		}
740
			}
-
 
741
 
-
 
742
	return found;
747
	}
743
}
748
	best_clock->n = bestn;
-
 
749
	best_clock->m1 = bestm1;
-
 
750
	best_clock->m2 = bestm2;
-
 
751
	best_clock->p1 = bestp1;
-
 
752
	best_clock->p2 = bestp2;
-
 
-
 
744
 
-
 
745
bool intel_crtc_active(struct drm_crtc *crtc)
-
 
746
{
-
 
747
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
748
 
-
 
749
	/* Be paranoid as we can arrive here with only partial
-
 
750
	 * state retrieved from the hardware during setup.
-
 
751
	 *
-
 
752
	 * We can ditch the adjusted_mode.crtc_clock check as soon
-
 
753
	 * as Haswell has gained clock readout/fastboot support.
-
 
754
	 *
-
 
755
	 * We can ditch the crtc->fb check as soon as we can
-
 
756
	 * properly reconstruct framebuffers.
753
 
757
	 */
-
 
758
	return intel_crtc->active && crtc->fb &&
754
	return true;
759
		intel_crtc->config.adjusted_mode.crtc_clock;
755
}
760
}
756
 
761
 
757
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
762
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
758
					     enum pipe pipe)
763
					     enum pipe pipe)
759
{
764
{
760
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
765
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
761
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
766
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
762
 
767
 
763
	return intel_crtc->config.cpu_transcoder;
768
	return intel_crtc->config.cpu_transcoder;
764
}
769
}
765
 
770
 
766
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
771
static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
767
{
772
{
768
	struct drm_i915_private *dev_priv = dev->dev_private;
773
	struct drm_i915_private *dev_priv = dev->dev_private;
769
	u32 frame, frame_reg = PIPEFRAME(pipe);
774
	u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
770
 
775
 
771
	frame = I915_READ(frame_reg);
776
	frame = I915_READ(frame_reg);
772
 
777
 
773
	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
778
	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
774
		DRM_DEBUG_KMS("vblank wait timed out\n");
779
		DRM_DEBUG_KMS("vblank wait timed out\n");
775
}
780
}
776
 
781
 
777
/**
782
/**
778
 * intel_wait_for_vblank - wait for vblank on a given pipe
783
 * intel_wait_for_vblank - wait for vblank on a given pipe
779
 * @dev: drm device
784
 * @dev: drm device
780
 * @pipe: pipe to wait for
785
 * @pipe: pipe to wait for
781
 *
786
 *
782
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
787
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
783
 * mode setting code.
788
 * mode setting code.
784
 */
789
 */
785
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
790
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
786
{
791
{
787
	struct drm_i915_private *dev_priv = dev->dev_private;
792
	struct drm_i915_private *dev_priv = dev->dev_private;
788
	int pipestat_reg = PIPESTAT(pipe);
793
	int pipestat_reg = PIPESTAT(pipe);
789
 
794
 
790
	if (INTEL_INFO(dev)->gen >= 5) {
795
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
791
		ironlake_wait_for_vblank(dev, pipe);
796
		g4x_wait_for_vblank(dev, pipe);
792
		return;
797
		return;
793
	}
798
	}
794
 
799
 
795
	/* Clear existing vblank status. Note this will clear any other
800
	/* Clear existing vblank status. Note this will clear any other
796
	 * sticky status fields as well.
801
	 * sticky status fields as well.
797
	 *
802
	 *
798
	 * This races with i915_driver_irq_handler() with the result
803
	 * This races with i915_driver_irq_handler() with the result
799
	 * that either function could miss a vblank event.  Here it is not
804
	 * that either function could miss a vblank event.  Here it is not
800
	 * fatal, as we will either wait upon the next vblank interrupt or
805
	 * fatal, as we will either wait upon the next vblank interrupt or
801
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
806
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
802
	 * called during modeset at which time the GPU should be idle and
807
	 * called during modeset at which time the GPU should be idle and
803
	 * should *not* be performing page flips and thus not waiting on
808
	 * should *not* be performing page flips and thus not waiting on
804
	 * vblanks...
809
	 * vblanks...
805
	 * Currently, the result of us stealing a vblank from the irq
810
	 * Currently, the result of us stealing a vblank from the irq
806
	 * handler is that a single frame will be skipped during swapbuffers.
811
	 * handler is that a single frame will be skipped during swapbuffers.
807
	 */
812
	 */
808
	I915_WRITE(pipestat_reg,
813
	I915_WRITE(pipestat_reg,
809
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
814
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
810
 
815
 
811
	/* Wait for vblank interrupt bit to set */
816
	/* Wait for vblank interrupt bit to set */
812
	if (wait_for(I915_READ(pipestat_reg) &
817
	if (wait_for(I915_READ(pipestat_reg) &
813
		     PIPE_VBLANK_INTERRUPT_STATUS,
818
		     PIPE_VBLANK_INTERRUPT_STATUS,
814
		     50))
819
		     50))
815
		DRM_DEBUG_KMS("vblank wait timed out\n");
820
		DRM_DEBUG_KMS("vblank wait timed out\n");
816
}
821
}
-
 
822
 
-
 
823
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
-
 
824
{
-
 
825
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
826
	u32 reg = PIPEDSL(pipe);
-
 
827
	u32 line1, line2;
-
 
828
	u32 line_mask;
-
 
829
 
-
 
830
	if (IS_GEN2(dev))
-
 
831
		line_mask = DSL_LINEMASK_GEN2;
-
 
832
	else
-
 
833
		line_mask = DSL_LINEMASK_GEN3;
-
 
834
 
-
 
835
	line1 = I915_READ(reg) & line_mask;
-
 
836
	mdelay(5);
-
 
837
	line2 = I915_READ(reg) & line_mask;
-
 
838
 
-
 
839
	return line1 == line2;
-
 
840
}
817
 
841
 
818
/*
842
/*
819
 * intel_wait_for_pipe_off - wait for pipe to turn off
843
 * intel_wait_for_pipe_off - wait for pipe to turn off
820
 * @dev: drm device
844
 * @dev: drm device
821
 * @pipe: pipe to wait for
845
 * @pipe: pipe to wait for
822
 *
846
 *
823
 * After disabling a pipe, we can't wait for vblank in the usual way,
847
 * After disabling a pipe, we can't wait for vblank in the usual way,
824
 * spinning on the vblank interrupt status bit, since we won't actually
848
 * spinning on the vblank interrupt status bit, since we won't actually
825
 * see an interrupt when the pipe is disabled.
849
 * see an interrupt when the pipe is disabled.
826
 *
850
 *
827
 * On Gen4 and above:
851
 * On Gen4 and above:
828
 *   wait for the pipe register state bit to turn off
852
 *   wait for the pipe register state bit to turn off
829
 *
853
 *
830
 * Otherwise:
854
 * Otherwise:
831
 *   wait for the display line value to settle (it usually
855
 *   wait for the display line value to settle (it usually
832
 *   ends up stopping at the start of the next frame).
856
 *   ends up stopping at the start of the next frame).
833
 *
857
 *
834
 */
858
 */
835
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
859
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
836
{
860
{
837
	struct drm_i915_private *dev_priv = dev->dev_private;
861
	struct drm_i915_private *dev_priv = dev->dev_private;
838
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
862
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
839
								      pipe);
863
								      pipe);
840
 
864
 
841
	if (INTEL_INFO(dev)->gen >= 4) {
865
	if (INTEL_INFO(dev)->gen >= 4) {
842
		int reg = PIPECONF(cpu_transcoder);
866
		int reg = PIPECONF(cpu_transcoder);
843
 
867
 
844
		/* Wait for the Pipe State to go off */
868
		/* Wait for the Pipe State to go off */
845
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
869
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
846
			     100))
870
			     100))
847
			WARN(1, "pipe_off wait timed out\n");
871
			WARN(1, "pipe_off wait timed out\n");
848
	} else {
872
	} else {
849
		u32 last_line, line_mask;
-
 
850
		int reg = PIPEDSL(pipe);
-
 
851
		unsigned long timeout = GetTimerTicks() + msecs_to_jiffies(100);
-
 
852
 
-
 
853
		if (IS_GEN2(dev))
-
 
854
			line_mask = DSL_LINEMASK_GEN2;
-
 
855
		else
-
 
856
			line_mask = DSL_LINEMASK_GEN3;
-
 
857
 
-
 
858
		/* Wait for the display line to settle */
873
		/* Wait for the display line to settle */
859
		do {
-
 
860
			last_line = I915_READ(reg) & line_mask;
-
 
861
			mdelay(5);
-
 
862
		} while (((I915_READ(reg) & line_mask) != last_line) &&
-
 
863
			 time_after(timeout, GetTimerTicks()));
-
 
864
		if (time_after(GetTimerTicks(), timeout))
874
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
865
			WARN(1, "pipe_off wait timed out\n");
875
			WARN(1, "pipe_off wait timed out\n");
866
	}
876
	}
867
}
877
}
868
 
878
 
869
/*
879
/*
870
 * ibx_digital_port_connected - is the specified port connected?
880
 * ibx_digital_port_connected - is the specified port connected?
871
 * @dev_priv: i915 private structure
881
 * @dev_priv: i915 private structure
872
 * @port: the port to test
882
 * @port: the port to test
873
 *
883
 *
874
 * Returns true if @port is connected, false otherwise.
884
 * Returns true if @port is connected, false otherwise.
875
 */
885
 */
876
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
886
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
877
				struct intel_digital_port *port)
887
				struct intel_digital_port *port)
878
{
888
{
879
	u32 bit;
889
	u32 bit;
880
 
890
 
881
	if (HAS_PCH_IBX(dev_priv->dev)) {
891
	if (HAS_PCH_IBX(dev_priv->dev)) {
882
		switch(port->port) {
892
		switch(port->port) {
883
		case PORT_B:
893
		case PORT_B:
884
			bit = SDE_PORTB_HOTPLUG;
894
			bit = SDE_PORTB_HOTPLUG;
885
			break;
895
			break;
886
		case PORT_C:
896
		case PORT_C:
887
			bit = SDE_PORTC_HOTPLUG;
897
			bit = SDE_PORTC_HOTPLUG;
888
			break;
898
			break;
889
		case PORT_D:
899
		case PORT_D:
890
			bit = SDE_PORTD_HOTPLUG;
900
			bit = SDE_PORTD_HOTPLUG;
891
			break;
901
			break;
892
		default:
902
		default:
893
			return true;
903
			return true;
894
		}
904
		}
895
	} else {
905
	} else {
896
		switch(port->port) {
906
		switch(port->port) {
897
		case PORT_B:
907
		case PORT_B:
898
			bit = SDE_PORTB_HOTPLUG_CPT;
908
			bit = SDE_PORTB_HOTPLUG_CPT;
899
			break;
909
			break;
900
		case PORT_C:
910
		case PORT_C:
901
			bit = SDE_PORTC_HOTPLUG_CPT;
911
			bit = SDE_PORTC_HOTPLUG_CPT;
902
			break;
912
			break;
903
		case PORT_D:
913
		case PORT_D:
904
			bit = SDE_PORTD_HOTPLUG_CPT;
914
			bit = SDE_PORTD_HOTPLUG_CPT;
905
			break;
915
			break;
906
		default:
916
		default:
907
			return true;
917
			return true;
908
		}
918
		}
909
	}
919
	}
910
 
920
 
911
	return I915_READ(SDEISR) & bit;
921
	return I915_READ(SDEISR) & bit;
912
}
922
}
913
 
923
 
914
static const char *state_string(bool enabled)
924
static const char *state_string(bool enabled)
915
{
925
{
916
	return enabled ? "on" : "off";
926
	return enabled ? "on" : "off";
917
}
927
}
918
 
928
 
919
/* Only for pre-ILK configs */
929
/* Only for pre-ILK configs */
920
void assert_pll(struct drm_i915_private *dev_priv,
930
void assert_pll(struct drm_i915_private *dev_priv,
921
		       enum pipe pipe, bool state)
931
		       enum pipe pipe, bool state)
922
{
932
{
923
	int reg;
933
	int reg;
924
	u32 val;
934
	u32 val;
925
	bool cur_state;
935
	bool cur_state;
926
 
936
 
927
	reg = DPLL(pipe);
937
	reg = DPLL(pipe);
928
	val = I915_READ(reg);
938
	val = I915_READ(reg);
929
	cur_state = !!(val & DPLL_VCO_ENABLE);
939
	cur_state = !!(val & DPLL_VCO_ENABLE);
930
	WARN(cur_state != state,
940
	WARN(cur_state != state,
931
	     "PLL state assertion failure (expected %s, current %s)\n",
941
	     "PLL state assertion failure (expected %s, current %s)\n",
932
	     state_string(state), state_string(cur_state));
942
	     state_string(state), state_string(cur_state));
933
}
943
}
-
 
944
 
-
 
945
/* XXX: the dsi pll is shared between MIPI DSI ports */
-
 
946
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
-
 
947
{
-
 
948
	u32 val;
-
 
949
	bool cur_state;
-
 
950
 
-
 
951
	mutex_lock(&dev_priv->dpio_lock);
-
 
952
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
-
 
953
	mutex_unlock(&dev_priv->dpio_lock);
-
 
954
 
-
 
955
	cur_state = val & DSI_PLL_VCO_EN;
-
 
956
	WARN(cur_state != state,
-
 
957
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
-
 
958
	     state_string(state), state_string(cur_state));
-
 
959
}
-
 
960
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
-
 
961
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
934
 
962
 
935
struct intel_shared_dpll *
963
struct intel_shared_dpll *
936
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
964
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
937
{
965
{
938
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
966
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
939
 
967
 
940
	if (crtc->config.shared_dpll < 0)
968
	if (crtc->config.shared_dpll < 0)
941
		return NULL;
969
		return NULL;
942
 
970
 
943
	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
971
	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
944
}
972
}
945
 
973
 
946
/* For ILK+ */
974
/* For ILK+ */
947
void assert_shared_dpll(struct drm_i915_private *dev_priv,
975
void assert_shared_dpll(struct drm_i915_private *dev_priv,
948
			       struct intel_shared_dpll *pll,
976
			       struct intel_shared_dpll *pll,
949
			   bool state)
977
			   bool state)
950
{
978
{
951
	bool cur_state;
979
	bool cur_state;
952
	struct intel_dpll_hw_state hw_state;
980
	struct intel_dpll_hw_state hw_state;
953
 
981
 
954
	if (HAS_PCH_LPT(dev_priv->dev)) {
982
	if (HAS_PCH_LPT(dev_priv->dev)) {
955
		DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
983
		DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
956
		return;
984
		return;
957
	}
985
	}
958
 
986
 
959
	if (WARN (!pll,
987
	if (WARN (!pll,
960
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
988
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
961
		return;
989
		return;
962
 
990
 
963
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
991
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
964
	WARN(cur_state != state,
992
	WARN(cur_state != state,
965
	     "%s assertion failure (expected %s, current %s)\n",
993
	     "%s assertion failure (expected %s, current %s)\n",
966
	     pll->name, state_string(state), state_string(cur_state));
994
	     pll->name, state_string(state), state_string(cur_state));
967
}
995
}
968
 
996
 
969
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
997
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
970
			  enum pipe pipe, bool state)
998
			  enum pipe pipe, bool state)
971
{
999
{
972
	int reg;
1000
	int reg;
973
	u32 val;
1001
	u32 val;
974
	bool cur_state;
1002
	bool cur_state;
975
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1003
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
976
								      pipe);
1004
								      pipe);
977
 
1005
 
978
	if (HAS_DDI(dev_priv->dev)) {
1006
	if (HAS_DDI(dev_priv->dev)) {
979
		/* DDI does not have a specific FDI_TX register */
1007
		/* DDI does not have a specific FDI_TX register */
980
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1008
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
981
		val = I915_READ(reg);
1009
		val = I915_READ(reg);
982
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1010
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
983
	} else {
1011
	} else {
984
	reg = FDI_TX_CTL(pipe);
1012
	reg = FDI_TX_CTL(pipe);
985
	val = I915_READ(reg);
1013
	val = I915_READ(reg);
986
	cur_state = !!(val & FDI_TX_ENABLE);
1014
	cur_state = !!(val & FDI_TX_ENABLE);
987
	}
1015
	}
988
	WARN(cur_state != state,
1016
	WARN(cur_state != state,
989
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1017
	     "FDI TX state assertion failure (expected %s, current %s)\n",
990
	     state_string(state), state_string(cur_state));
1018
	     state_string(state), state_string(cur_state));
991
}
1019
}
992
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1020
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
993
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1021
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
994
 
1022
 
995
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1023
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
996
			  enum pipe pipe, bool state)
1024
			  enum pipe pipe, bool state)
997
{
1025
{
998
	int reg;
1026
	int reg;
999
	u32 val;
1027
	u32 val;
1000
	bool cur_state;
1028
	bool cur_state;
1001
 
1029
 
1002
	reg = FDI_RX_CTL(pipe);
1030
	reg = FDI_RX_CTL(pipe);
1003
	val = I915_READ(reg);
1031
	val = I915_READ(reg);
1004
	cur_state = !!(val & FDI_RX_ENABLE);
1032
	cur_state = !!(val & FDI_RX_ENABLE);
1005
	WARN(cur_state != state,
1033
	WARN(cur_state != state,
1006
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1034
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1007
	     state_string(state), state_string(cur_state));
1035
	     state_string(state), state_string(cur_state));
1008
}
1036
}
1009
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1037
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1010
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1038
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1011
 
1039
 
1012
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1040
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1013
				      enum pipe pipe)
1041
				      enum pipe pipe)
1014
{
1042
{
1015
	int reg;
1043
	int reg;
1016
	u32 val;
1044
	u32 val;
1017
 
1045
 
1018
	/* ILK FDI PLL is always enabled */
1046
	/* ILK FDI PLL is always enabled */
1019
	if (dev_priv->info->gen == 5)
1047
	if (dev_priv->info->gen == 5)
1020
		return;
1048
		return;
1021
 
1049
 
1022
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1050
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1023
	if (HAS_DDI(dev_priv->dev))
1051
	if (HAS_DDI(dev_priv->dev))
1024
		return;
1052
		return;
1025
 
1053
 
1026
	reg = FDI_TX_CTL(pipe);
1054
	reg = FDI_TX_CTL(pipe);
1027
	val = I915_READ(reg);
1055
	val = I915_READ(reg);
1028
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1056
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1029
}
1057
}
1030
 
1058
 
1031
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1059
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1032
		       enum pipe pipe, bool state)
1060
		       enum pipe pipe, bool state)
1033
{
1061
{
1034
	int reg;
1062
	int reg;
1035
	u32 val;
1063
	u32 val;
1036
	bool cur_state;
1064
	bool cur_state;
1037
 
1065
 
1038
	reg = FDI_RX_CTL(pipe);
1066
	reg = FDI_RX_CTL(pipe);
1039
	val = I915_READ(reg);
1067
	val = I915_READ(reg);
1040
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1068
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1041
	WARN(cur_state != state,
1069
	WARN(cur_state != state,
1042
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1070
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1043
	     state_string(state), state_string(cur_state));
1071
	     state_string(state), state_string(cur_state));
1044
}
1072
}
1045
 
1073
 
1046
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1074
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1047
				  enum pipe pipe)
1075
				  enum pipe pipe)
1048
{
1076
{
1049
	int pp_reg, lvds_reg;
1077
	int pp_reg, lvds_reg;
1050
	u32 val;
1078
	u32 val;
1051
	enum pipe panel_pipe = PIPE_A;
1079
	enum pipe panel_pipe = PIPE_A;
1052
	bool locked = true;
1080
	bool locked = true;
1053
 
1081
 
1054
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1082
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1055
		pp_reg = PCH_PP_CONTROL;
1083
		pp_reg = PCH_PP_CONTROL;
1056
		lvds_reg = PCH_LVDS;
1084
		lvds_reg = PCH_LVDS;
1057
	} else {
1085
	} else {
1058
		pp_reg = PP_CONTROL;
1086
		pp_reg = PP_CONTROL;
1059
		lvds_reg = LVDS;
1087
		lvds_reg = LVDS;
1060
	}
1088
	}
1061
 
1089
 
1062
	val = I915_READ(pp_reg);
1090
	val = I915_READ(pp_reg);
1063
	if (!(val & PANEL_POWER_ON) ||
1091
	if (!(val & PANEL_POWER_ON) ||
1064
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1092
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1065
		locked = false;
1093
		locked = false;
1066
 
1094
 
1067
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1095
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1068
		panel_pipe = PIPE_B;
1096
		panel_pipe = PIPE_B;
1069
 
1097
 
1070
	WARN(panel_pipe == pipe && locked,
1098
	WARN(panel_pipe == pipe && locked,
1071
	     "panel assertion failure, pipe %c regs locked\n",
1099
	     "panel assertion failure, pipe %c regs locked\n",
1072
	     pipe_name(pipe));
1100
	     pipe_name(pipe));
1073
}
1101
}
-
 
1102
 
-
 
1103
static void assert_cursor(struct drm_i915_private *dev_priv,
-
 
1104
			  enum pipe pipe, bool state)
-
 
1105
{
-
 
1106
	struct drm_device *dev = dev_priv->dev;
-
 
1107
	bool cur_state;
-
 
1108
 
-
 
1109
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
-
 
1110
		cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
-
 
1111
	else if (IS_845G(dev) || IS_I865G(dev))
-
 
1112
		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
-
 
1113
	else
-
 
1114
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
-
 
1115
 
-
 
1116
	WARN(cur_state != state,
-
 
1117
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
-
 
1118
	     pipe_name(pipe), state_string(state), state_string(cur_state));
-
 
1119
}
-
 
1120
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
-
 
1121
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1074
 
1122
 
1075
void assert_pipe(struct drm_i915_private *dev_priv,
1123
void assert_pipe(struct drm_i915_private *dev_priv,
1076
			enum pipe pipe, bool state)
1124
			enum pipe pipe, bool state)
1077
{
1125
{
1078
	int reg;
1126
	int reg;
1079
	u32 val;
1127
	u32 val;
1080
	bool cur_state;
1128
	bool cur_state;
1081
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1129
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1082
								      pipe);
1130
								      pipe);
1083
 
1131
 
1084
	/* if we need the pipe A quirk it must be always on */
1132
	/* if we need the pipe A quirk it must be always on */
1085
	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1133
	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1086
		state = true;
1134
		state = true;
1087
 
1135
 
1088
	if (!intel_display_power_enabled(dev_priv->dev,
1136
	if (!intel_display_power_enabled(dev_priv->dev,
1089
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1137
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1090
		cur_state = false;
1138
		cur_state = false;
1091
	} else {
1139
	} else {
1092
	reg = PIPECONF(cpu_transcoder);
1140
	reg = PIPECONF(cpu_transcoder);
1093
	val = I915_READ(reg);
1141
	val = I915_READ(reg);
1094
	cur_state = !!(val & PIPECONF_ENABLE);
1142
	cur_state = !!(val & PIPECONF_ENABLE);
1095
	}
1143
	}
1096
 
1144
 
1097
	WARN(cur_state != state,
1145
	WARN(cur_state != state,
1098
	     "pipe %c assertion failure (expected %s, current %s)\n",
1146
	     "pipe %c assertion failure (expected %s, current %s)\n",
1099
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1147
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1100
}
1148
}
1101
 
1149
 
1102
static void assert_plane(struct drm_i915_private *dev_priv,
1150
static void assert_plane(struct drm_i915_private *dev_priv,
1103
			 enum plane plane, bool state)
1151
			 enum plane plane, bool state)
1104
{
1152
{
1105
	int reg;
1153
	int reg;
1106
	u32 val;
1154
	u32 val;
1107
	bool cur_state;
1155
	bool cur_state;
1108
 
1156
 
1109
	reg = DSPCNTR(plane);
1157
	reg = DSPCNTR(plane);
1110
	val = I915_READ(reg);
1158
	val = I915_READ(reg);
1111
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1159
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1112
	WARN(cur_state != state,
1160
	WARN(cur_state != state,
1113
	     "plane %c assertion failure (expected %s, current %s)\n",
1161
	     "plane %c assertion failure (expected %s, current %s)\n",
1114
	     plane_name(plane), state_string(state), state_string(cur_state));
1162
	     plane_name(plane), state_string(state), state_string(cur_state));
1115
}
1163
}
1116
 
1164
 
1117
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1165
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1118
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1166
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1119
 
1167
 
1120
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1168
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1121
				   enum pipe pipe)
1169
				   enum pipe pipe)
1122
{
1170
{
1123
	struct drm_device *dev = dev_priv->dev;
1171
	struct drm_device *dev = dev_priv->dev;
1124
	int reg, i;
1172
	int reg, i;
1125
	u32 val;
1173
	u32 val;
1126
	int cur_pipe;
1174
	int cur_pipe;
1127
 
1175
 
1128
	/* Primary planes are fixed to pipes on gen4+ */
1176
	/* Primary planes are fixed to pipes on gen4+ */
1129
	if (INTEL_INFO(dev)->gen >= 4) {
1177
	if (INTEL_INFO(dev)->gen >= 4) {
1130
		reg = DSPCNTR(pipe);
1178
		reg = DSPCNTR(pipe);
1131
		val = I915_READ(reg);
1179
		val = I915_READ(reg);
1132
		WARN((val & DISPLAY_PLANE_ENABLE),
1180
		WARN((val & DISPLAY_PLANE_ENABLE),
1133
		     "plane %c assertion failure, should be disabled but not\n",
1181
		     "plane %c assertion failure, should be disabled but not\n",
1134
		     plane_name(pipe));
1182
		     plane_name(pipe));
1135
		return;
1183
		return;
1136
	}
1184
	}
1137
 
1185
 
1138
	/* Need to check both planes against the pipe */
1186
	/* Need to check both planes against the pipe */
1139
	for_each_pipe(i) {
1187
	for_each_pipe(i) {
1140
		reg = DSPCNTR(i);
1188
		reg = DSPCNTR(i);
1141
		val = I915_READ(reg);
1189
		val = I915_READ(reg);
1142
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1190
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1143
			DISPPLANE_SEL_PIPE_SHIFT;
1191
			DISPPLANE_SEL_PIPE_SHIFT;
1144
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1192
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1145
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1193
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1146
		     plane_name(i), pipe_name(pipe));
1194
		     plane_name(i), pipe_name(pipe));
1147
	}
1195
	}
1148
}
1196
}
1149
 
1197
 
1150
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1198
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1151
				    enum pipe pipe)
1199
				    enum pipe pipe)
1152
{
1200
{
1153
	struct drm_device *dev = dev_priv->dev;
1201
	struct drm_device *dev = dev_priv->dev;
1154
	int reg, i;
1202
	int reg, i;
1155
	u32 val;
1203
	u32 val;
1156
 
1204
 
1157
	if (IS_VALLEYVIEW(dev)) {
1205
	if (IS_VALLEYVIEW(dev)) {
1158
	for (i = 0; i < dev_priv->num_plane; i++) {
1206
	for (i = 0; i < dev_priv->num_plane; i++) {
1159
		reg = SPCNTR(pipe, i);
1207
		reg = SPCNTR(pipe, i);
1160
		val = I915_READ(reg);
1208
		val = I915_READ(reg);
1161
		WARN((val & SP_ENABLE),
1209
		WARN((val & SP_ENABLE),
1162
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1210
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1163
			     sprite_name(pipe, i), pipe_name(pipe));
1211
			     sprite_name(pipe, i), pipe_name(pipe));
1164
		}
1212
		}
1165
	} else if (INTEL_INFO(dev)->gen >= 7) {
1213
	} else if (INTEL_INFO(dev)->gen >= 7) {
1166
		reg = SPRCTL(pipe);
1214
		reg = SPRCTL(pipe);
1167
		val = I915_READ(reg);
1215
		val = I915_READ(reg);
1168
		WARN((val & SPRITE_ENABLE),
1216
		WARN((val & SPRITE_ENABLE),
1169
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1217
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1170
		     plane_name(pipe), pipe_name(pipe));
1218
		     plane_name(pipe), pipe_name(pipe));
1171
	} else if (INTEL_INFO(dev)->gen >= 5) {
1219
	} else if (INTEL_INFO(dev)->gen >= 5) {
1172
		reg = DVSCNTR(pipe);
1220
		reg = DVSCNTR(pipe);
1173
		val = I915_READ(reg);
1221
		val = I915_READ(reg);
1174
		WARN((val & DVS_ENABLE),
1222
		WARN((val & DVS_ENABLE),
1175
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1223
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1176
		     plane_name(pipe), pipe_name(pipe));
1224
		     plane_name(pipe), pipe_name(pipe));
1177
	}
1225
	}
1178
}
1226
}
1179
 
1227
 
1180
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1228
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1181
{
1229
{
1182
	u32 val;
1230
	u32 val;
1183
	bool enabled;
1231
	bool enabled;
1184
 
1232
 
1185
	if (HAS_PCH_LPT(dev_priv->dev)) {
-
 
1186
		DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
-
 
1187
		return;
-
 
1188
	}
1233
	WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1189
 
1234
 
1190
	val = I915_READ(PCH_DREF_CONTROL);
1235
	val = I915_READ(PCH_DREF_CONTROL);
1191
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1236
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1192
			    DREF_SUPERSPREAD_SOURCE_MASK));
1237
			    DREF_SUPERSPREAD_SOURCE_MASK));
1193
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1238
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1194
}
1239
}
1195
 
1240
 
1196
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1241
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1197
				       enum pipe pipe)
1242
				       enum pipe pipe)
1198
{
1243
{
1199
	int reg;
1244
	int reg;
1200
	u32 val;
1245
	u32 val;
1201
	bool enabled;
1246
	bool enabled;
1202
 
1247
 
1203
	reg = PCH_TRANSCONF(pipe);
1248
	reg = PCH_TRANSCONF(pipe);
1204
	val = I915_READ(reg);
1249
	val = I915_READ(reg);
1205
	enabled = !!(val & TRANS_ENABLE);
1250
	enabled = !!(val & TRANS_ENABLE);
1206
	WARN(enabled,
1251
	WARN(enabled,
1207
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1252
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1208
	     pipe_name(pipe));
1253
	     pipe_name(pipe));
1209
}
1254
}
1210
 
1255
 
1211
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1256
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1212
			    enum pipe pipe, u32 port_sel, u32 val)
1257
			    enum pipe pipe, u32 port_sel, u32 val)
1213
{
1258
{
1214
	if ((val & DP_PORT_EN) == 0)
1259
	if ((val & DP_PORT_EN) == 0)
1215
		return false;
1260
		return false;
1216
 
1261
 
1217
	if (HAS_PCH_CPT(dev_priv->dev)) {
1262
	if (HAS_PCH_CPT(dev_priv->dev)) {
1218
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1263
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1219
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1264
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1220
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1265
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1221
			return false;
1266
			return false;
1222
	} else {
1267
	} else {
1223
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1268
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1224
			return false;
1269
			return false;
1225
	}
1270
	}
1226
	return true;
1271
	return true;
1227
}
1272
}
1228
 
1273
 
1229
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1274
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1230
			      enum pipe pipe, u32 val)
1275
			      enum pipe pipe, u32 val)
1231
{
1276
{
1232
	if ((val & SDVO_ENABLE) == 0)
1277
	if ((val & SDVO_ENABLE) == 0)
1233
		return false;
1278
		return false;
1234
 
1279
 
1235
	if (HAS_PCH_CPT(dev_priv->dev)) {
1280
	if (HAS_PCH_CPT(dev_priv->dev)) {
1236
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1281
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1237
			return false;
1282
			return false;
1238
	} else {
1283
	} else {
1239
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1284
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1240
			return false;
1285
			return false;
1241
	}
1286
	}
1242
	return true;
1287
	return true;
1243
}
1288
}
1244
 
1289
 
1245
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1290
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1246
			      enum pipe pipe, u32 val)
1291
			      enum pipe pipe, u32 val)
1247
{
1292
{
1248
	if ((val & LVDS_PORT_EN) == 0)
1293
	if ((val & LVDS_PORT_EN) == 0)
1249
		return false;
1294
		return false;
1250
 
1295
 
1251
	if (HAS_PCH_CPT(dev_priv->dev)) {
1296
	if (HAS_PCH_CPT(dev_priv->dev)) {
1252
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1297
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1253
			return false;
1298
			return false;
1254
	} else {
1299
	} else {
1255
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1300
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1256
			return false;
1301
			return false;
1257
	}
1302
	}
1258
	return true;
1303
	return true;
1259
}
1304
}
1260
 
1305
 
1261
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1306
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1262
			      enum pipe pipe, u32 val)
1307
			      enum pipe pipe, u32 val)
1263
{
1308
{
1264
	if ((val & ADPA_DAC_ENABLE) == 0)
1309
	if ((val & ADPA_DAC_ENABLE) == 0)
1265
		return false;
1310
		return false;
1266
	if (HAS_PCH_CPT(dev_priv->dev)) {
1311
	if (HAS_PCH_CPT(dev_priv->dev)) {
1267
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1312
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1268
			return false;
1313
			return false;
1269
	} else {
1314
	} else {
1270
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1315
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1271
			return false;
1316
			return false;
1272
	}
1317
	}
1273
	return true;
1318
	return true;
1274
}
1319
}
1275
 
1320
 
1276
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1321
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1277
				   enum pipe pipe, int reg, u32 port_sel)
1322
				   enum pipe pipe, int reg, u32 port_sel)
1278
{
1323
{
1279
	u32 val = I915_READ(reg);
1324
	u32 val = I915_READ(reg);
1280
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1325
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1281
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1326
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1282
	     reg, pipe_name(pipe));
1327
	     reg, pipe_name(pipe));
1283
 
1328
 
1284
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1329
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1285
	     && (val & DP_PIPEB_SELECT),
1330
	     && (val & DP_PIPEB_SELECT),
1286
	     "IBX PCH dp port still using transcoder B\n");
1331
	     "IBX PCH dp port still using transcoder B\n");
1287
}
1332
}
1288
 
1333
 
1289
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1334
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1290
				     enum pipe pipe, int reg)
1335
				     enum pipe pipe, int reg)
1291
{
1336
{
1292
	u32 val = I915_READ(reg);
1337
	u32 val = I915_READ(reg);
1293
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1338
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1294
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1339
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1295
	     reg, pipe_name(pipe));
1340
	     reg, pipe_name(pipe));
1296
 
1341
 
1297
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1342
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1298
	     && (val & SDVO_PIPE_B_SELECT),
1343
	     && (val & SDVO_PIPE_B_SELECT),
1299
	     "IBX PCH hdmi port still using transcoder B\n");
1344
	     "IBX PCH hdmi port still using transcoder B\n");
1300
}
1345
}
1301
 
1346
 
1302
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1347
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1303
				      enum pipe pipe)
1348
				      enum pipe pipe)
1304
{
1349
{
1305
	int reg;
1350
	int reg;
1306
	u32 val;
1351
	u32 val;
1307
 
1352
 
1308
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1353
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1309
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1354
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1310
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1355
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1311
 
1356
 
1312
	reg = PCH_ADPA;
1357
	reg = PCH_ADPA;
1313
	val = I915_READ(reg);
1358
	val = I915_READ(reg);
1314
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1359
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1315
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1360
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1316
	     pipe_name(pipe));
1361
	     pipe_name(pipe));
1317
 
1362
 
1318
	reg = PCH_LVDS;
1363
	reg = PCH_LVDS;
1319
	val = I915_READ(reg);
1364
	val = I915_READ(reg);
1320
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1365
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1321
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1366
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1322
	     pipe_name(pipe));
1367
	     pipe_name(pipe));
1323
 
1368
 
1324
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1369
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1325
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1370
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1326
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1371
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1327
}
1372
}
-
 
1373
 
-
 
1374
static void intel_init_dpio(struct drm_device *dev)
-
 
1375
{
-
 
1376
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1377
 
-
 
1378
	if (!IS_VALLEYVIEW(dev))
-
 
1379
		return;
-
 
1380
 
-
 
1381
	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
-
 
1382
}
-
 
1383
 
-
 
1384
static void intel_reset_dpio(struct drm_device *dev)
-
 
1385
{
-
 
1386
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1387
 
-
 
1388
	if (!IS_VALLEYVIEW(dev))
-
 
1389
		return;
-
 
1390
 
-
 
1391
	/*
-
 
1392
	 * Enable the CRI clock source so we can get at the display and the
-
 
1393
	 * reference clock for VGA hotplug / manual detection.
-
 
1394
	 */
-
 
1395
	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-
 
1396
		   DPLL_REFA_CLK_ENABLE_VLV |
-
 
1397
		   DPLL_INTEGRATED_CRI_CLK_VLV);
-
 
1398
 
-
 
1399
	/*
-
 
1400
	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
-
 
1401
	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
-
 
1402
	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
-
 
1403
	 *   b.	The other bits such as sfr settings / modesel may all be set
-
 
1404
	 *      to 0.
-
 
1405
	 *
-
 
1406
	 * This should only be done on init and resume from S3 with both
-
 
1407
	 * PLLs disabled, or we risk losing DPIO and PLL synchronization.
-
 
1408
	 */
-
 
1409
	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-
 
1410
}
1328
 
1411
 
1329
static void vlv_enable_pll(struct intel_crtc *crtc)
1412
static void vlv_enable_pll(struct intel_crtc *crtc)
1330
{
1413
{
1331
	struct drm_device *dev = crtc->base.dev;
1414
	struct drm_device *dev = crtc->base.dev;
1332
	struct drm_i915_private *dev_priv = dev->dev_private;
1415
	struct drm_i915_private *dev_priv = dev->dev_private;
1333
	int reg = DPLL(crtc->pipe);
1416
	int reg = DPLL(crtc->pipe);
1334
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1417
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1335
 
1418
 
1336
	assert_pipe_disabled(dev_priv, crtc->pipe);
1419
	assert_pipe_disabled(dev_priv, crtc->pipe);
1337
 
1420
 
1338
    /* No really, not for ILK+ */
1421
    /* No really, not for ILK+ */
1339
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1422
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1340
 
1423
 
1341
    /* PLL is protected by panel, make sure we can write it */
1424
    /* PLL is protected by panel, make sure we can write it */
1342
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1425
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1343
		assert_panel_unlocked(dev_priv, crtc->pipe);
1426
		assert_panel_unlocked(dev_priv, crtc->pipe);
1344
 
1427
 
1345
	I915_WRITE(reg, dpll);
1428
	I915_WRITE(reg, dpll);
1346
	POSTING_READ(reg);
1429
	POSTING_READ(reg);
1347
	udelay(150);
1430
	udelay(150);
1348
 
1431
 
1349
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1432
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1350
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1433
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1351
 
1434
 
1352
	I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1435
	I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1353
	POSTING_READ(DPLL_MD(crtc->pipe));
1436
	POSTING_READ(DPLL_MD(crtc->pipe));
1354
 
1437
 
1355
	/* We do this three times for luck */
1438
	/* We do this three times for luck */
1356
	I915_WRITE(reg, dpll);
1439
	I915_WRITE(reg, dpll);
1357
	POSTING_READ(reg);
1440
	POSTING_READ(reg);
1358
	udelay(150); /* wait for warmup */
1441
	udelay(150); /* wait for warmup */
1359
	I915_WRITE(reg, dpll);
1442
	I915_WRITE(reg, dpll);
1360
	POSTING_READ(reg);
1443
	POSTING_READ(reg);
1361
	udelay(150); /* wait for warmup */
1444
	udelay(150); /* wait for warmup */
1362
	I915_WRITE(reg, dpll);
1445
	I915_WRITE(reg, dpll);
1363
	POSTING_READ(reg);
1446
	POSTING_READ(reg);
1364
	udelay(150); /* wait for warmup */
1447
	udelay(150); /* wait for warmup */
1365
}
1448
}
1366
 
1449
 
1367
static void i9xx_enable_pll(struct intel_crtc *crtc)
1450
static void i9xx_enable_pll(struct intel_crtc *crtc)
1368
{
1451
{
1369
	struct drm_device *dev = crtc->base.dev;
1452
	struct drm_device *dev = crtc->base.dev;
1370
	struct drm_i915_private *dev_priv = dev->dev_private;
1453
	struct drm_i915_private *dev_priv = dev->dev_private;
1371
	int reg = DPLL(crtc->pipe);
1454
	int reg = DPLL(crtc->pipe);
1372
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1455
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1373
 
1456
 
1374
	assert_pipe_disabled(dev_priv, crtc->pipe);
1457
	assert_pipe_disabled(dev_priv, crtc->pipe);
1375
 
1458
 
1376
	/* No really, not for ILK+ */
1459
	/* No really, not for ILK+ */
1377
	BUG_ON(dev_priv->info->gen >= 5);
1460
	BUG_ON(dev_priv->info->gen >= 5);
1378
 
1461
 
1379
	/* PLL is protected by panel, make sure we can write it */
1462
	/* PLL is protected by panel, make sure we can write it */
1380
	if (IS_MOBILE(dev) && !IS_I830(dev))
1463
	if (IS_MOBILE(dev) && !IS_I830(dev))
1381
		assert_panel_unlocked(dev_priv, crtc->pipe);
1464
		assert_panel_unlocked(dev_priv, crtc->pipe);
1382
 
1465
 
1383
	I915_WRITE(reg, dpll);
1466
	I915_WRITE(reg, dpll);
1384
 
1467
 
1385
	/* Wait for the clocks to stabilize. */
1468
	/* Wait for the clocks to stabilize. */
1386
	POSTING_READ(reg);
1469
	POSTING_READ(reg);
1387
	udelay(150);
1470
	udelay(150);
1388
 
1471
 
1389
	if (INTEL_INFO(dev)->gen >= 4) {
1472
	if (INTEL_INFO(dev)->gen >= 4) {
1390
		I915_WRITE(DPLL_MD(crtc->pipe),
1473
		I915_WRITE(DPLL_MD(crtc->pipe),
1391
			   crtc->config.dpll_hw_state.dpll_md);
1474
			   crtc->config.dpll_hw_state.dpll_md);
1392
	} else {
1475
	} else {
1393
		/* The pixel multiplier can only be updated once the
1476
		/* The pixel multiplier can only be updated once the
1394
		 * DPLL is enabled and the clocks are stable.
1477
		 * DPLL is enabled and the clocks are stable.
1395
		 *
1478
		 *
1396
		 * So write it again.
1479
		 * So write it again.
1397
		 */
1480
		 */
1398
		I915_WRITE(reg, dpll);
1481
		I915_WRITE(reg, dpll);
1399
	}
1482
	}
1400
 
1483
 
1401
    /* We do this three times for luck */
1484
    /* We do this three times for luck */
1402
	I915_WRITE(reg, dpll);
1485
	I915_WRITE(reg, dpll);
1403
    POSTING_READ(reg);
1486
    POSTING_READ(reg);
1404
    udelay(150); /* wait for warmup */
1487
    udelay(150); /* wait for warmup */
1405
	I915_WRITE(reg, dpll);
1488
	I915_WRITE(reg, dpll);
1406
    POSTING_READ(reg);
1489
    POSTING_READ(reg);
1407
    udelay(150); /* wait for warmup */
1490
    udelay(150); /* wait for warmup */
1408
	I915_WRITE(reg, dpll);
1491
	I915_WRITE(reg, dpll);
1409
    POSTING_READ(reg);
1492
    POSTING_READ(reg);
1410
    udelay(150); /* wait for warmup */
1493
    udelay(150); /* wait for warmup */
1411
}
1494
}
1412
 
1495
 
1413
/**
1496
/**
1414
 * i9xx_disable_pll - disable a PLL
1497
 * i9xx_disable_pll - disable a PLL
1415
 * @dev_priv: i915 private structure
1498
 * @dev_priv: i915 private structure
1416
 * @pipe: pipe PLL to disable
1499
 * @pipe: pipe PLL to disable
1417
 *
1500
 *
1418
 * Disable the PLL for @pipe, making sure the pipe is off first.
1501
 * Disable the PLL for @pipe, making sure the pipe is off first.
1419
 *
1502
 *
1420
 * Note!  This is for pre-ILK only.
1503
 * Note!  This is for pre-ILK only.
1421
 */
1504
 */
1422
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1505
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1423
{
1506
{
1424
	/* Don't disable pipe A or pipe A PLLs if needed */
1507
	/* Don't disable pipe A or pipe A PLLs if needed */
1425
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1508
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1426
		return;
1509
		return;
1427
 
1510
 
1428
	/* Make sure the pipe isn't still relying on us */
1511
	/* Make sure the pipe isn't still relying on us */
1429
	assert_pipe_disabled(dev_priv, pipe);
1512
	assert_pipe_disabled(dev_priv, pipe);
1430
 
1513
 
1431
	I915_WRITE(DPLL(pipe), 0);
1514
	I915_WRITE(DPLL(pipe), 0);
1432
	POSTING_READ(DPLL(pipe));
1515
	POSTING_READ(DPLL(pipe));
1433
}
1516
}
1434
 
1517
 
1435
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1518
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1436
{
1519
{
1437
	u32 val = 0;
1520
	u32 val = 0;
1438
 
1521
 
1439
	/* Make sure the pipe isn't still relying on us */
1522
	/* Make sure the pipe isn't still relying on us */
1440
	assert_pipe_disabled(dev_priv, pipe);
1523
	assert_pipe_disabled(dev_priv, pipe);
-
 
1524
 
1441
 
1525
	/*
-
 
1526
	 * Leave integrated clock source and reference clock enabled for pipe B.
-
 
1527
	 * The latter is needed for VGA hotplug / manual detection.
1442
	/* Leave integrated clock source enabled */
1528
	 */
1443
	if (pipe == PIPE_B)
1529
	if (pipe == PIPE_B)
1444
		val = DPLL_INTEGRATED_CRI_CLK_VLV;
1530
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1445
	I915_WRITE(DPLL(pipe), val);
1531
	I915_WRITE(DPLL(pipe), val);
1446
	POSTING_READ(DPLL(pipe));
1532
	POSTING_READ(DPLL(pipe));
1447
}
1533
}
1448
 
1534
 
-
 
1535
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1449
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
1536
		struct intel_digital_port *dport)
1450
{
1537
{
1451
	u32 port_mask;
1538
	u32 port_mask;
1452
 
1539
 
-
 
1540
	switch (dport->port) {
1453
	if (!port)
1541
	case PORT_B:
1454
		port_mask = DPLL_PORTB_READY_MASK;
1542
		port_mask = DPLL_PORTB_READY_MASK;
-
 
1543
		break;
1455
	else
1544
	case PORT_C:
-
 
1545
		port_mask = DPLL_PORTC_READY_MASK;
-
 
1546
		break;
-
 
1547
	default:
-
 
1548
		BUG();
1456
		port_mask = DPLL_PORTC_READY_MASK;
1549
	}
1457
 
1550
 
1458
	if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
1551
	if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
1459
		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1552
		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1460
		     'B' + port, I915_READ(DPLL(0)));
1553
		     port_name(dport->port), I915_READ(DPLL(0)));
1461
}
1554
}
1462
 
1555
 
1463
/**
1556
/**
1464
 * ironlake_enable_shared_dpll - enable PCH PLL
1557
 * ironlake_enable_shared_dpll - enable PCH PLL
1465
 * @dev_priv: i915 private structure
1558
 * @dev_priv: i915 private structure
1466
 * @pipe: pipe PLL to enable
1559
 * @pipe: pipe PLL to enable
1467
 *
1560
 *
1468
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1561
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1469
 * drives the transcoder clock.
1562
 * drives the transcoder clock.
1470
 */
1563
 */
1471
static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
1564
static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
1472
{
1565
{
1473
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1566
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1474
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1567
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1475
 
1568
 
1476
	/* PCH PLLs only available on ILK, SNB and IVB */
1569
	/* PCH PLLs only available on ILK, SNB and IVB */
1477
	BUG_ON(dev_priv->info->gen < 5);
1570
	BUG_ON(dev_priv->info->gen < 5);
1478
	if (WARN_ON(pll == NULL))
1571
	if (WARN_ON(pll == NULL))
1479
		return;
1572
		return;
1480
 
1573
 
1481
	if (WARN_ON(pll->refcount == 0))
1574
	if (WARN_ON(pll->refcount == 0))
1482
		return;
1575
		return;
1483
 
1576
 
1484
	DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1577
	DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1485
		      pll->name, pll->active, pll->on,
1578
		      pll->name, pll->active, pll->on,
1486
		      crtc->base.base.id);
1579
		      crtc->base.base.id);
1487
 
1580
 
1488
	if (pll->active++) {
1581
	if (pll->active++) {
1489
		WARN_ON(!pll->on);
1582
		WARN_ON(!pll->on);
1490
		assert_shared_dpll_enabled(dev_priv, pll);
1583
		assert_shared_dpll_enabled(dev_priv, pll);
1491
		return;
1584
		return;
1492
	}
1585
	}
1493
	WARN_ON(pll->on);
1586
	WARN_ON(pll->on);
1494
 
1587
 
1495
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1588
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1496
	pll->enable(dev_priv, pll);
1589
	pll->enable(dev_priv, pll);
1497
	pll->on = true;
1590
	pll->on = true;
1498
}
1591
}
1499
 
1592
 
1500
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1593
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1501
{
1594
{
1502
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1595
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1503
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1596
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1504
 
1597
 
1505
	/* PCH only available on ILK+ */
1598
	/* PCH only available on ILK+ */
1506
	BUG_ON(dev_priv->info->gen < 5);
1599
	BUG_ON(dev_priv->info->gen < 5);
1507
	if (WARN_ON(pll == NULL))
1600
	if (WARN_ON(pll == NULL))
1508
	       return;
1601
	       return;
1509
 
1602
 
1510
	if (WARN_ON(pll->refcount == 0))
1603
	if (WARN_ON(pll->refcount == 0))
1511
		return;
1604
		return;
1512
 
1605
 
1513
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1606
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1514
		      pll->name, pll->active, pll->on,
1607
		      pll->name, pll->active, pll->on,
1515
		      crtc->base.base.id);
1608
		      crtc->base.base.id);
1516
 
1609
 
1517
	if (WARN_ON(pll->active == 0)) {
1610
	if (WARN_ON(pll->active == 0)) {
1518
		assert_shared_dpll_disabled(dev_priv, pll);
1611
		assert_shared_dpll_disabled(dev_priv, pll);
1519
		return;
1612
		return;
1520
	}
1613
	}
1521
 
1614
 
1522
	assert_shared_dpll_enabled(dev_priv, pll);
1615
	assert_shared_dpll_enabled(dev_priv, pll);
1523
	WARN_ON(!pll->on);
1616
	WARN_ON(!pll->on);
1524
	if (--pll->active)
1617
	if (--pll->active)
1525
		return;
1618
		return;
1526
 
1619
 
1527
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1620
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1528
	pll->disable(dev_priv, pll);
1621
	pll->disable(dev_priv, pll);
1529
	pll->on = false;
1622
	pll->on = false;
1530
}
1623
}
1531
 
1624
 
1532
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1625
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1533
				    enum pipe pipe)
1626
				    enum pipe pipe)
1534
{
1627
{
1535
	struct drm_device *dev = dev_priv->dev;
1628
	struct drm_device *dev = dev_priv->dev;
1536
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1629
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1537
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1630
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1538
	uint32_t reg, val, pipeconf_val;
1631
	uint32_t reg, val, pipeconf_val;
1539
 
1632
 
1540
	/* PCH only available on ILK+ */
1633
	/* PCH only available on ILK+ */
1541
	BUG_ON(dev_priv->info->gen < 5);
1634
	BUG_ON(dev_priv->info->gen < 5);
1542
 
1635
 
1543
	/* Make sure PCH DPLL is enabled */
1636
	/* Make sure PCH DPLL is enabled */
1544
	assert_shared_dpll_enabled(dev_priv,
1637
	assert_shared_dpll_enabled(dev_priv,
1545
				   intel_crtc_to_shared_dpll(intel_crtc));
1638
				   intel_crtc_to_shared_dpll(intel_crtc));
1546
 
1639
 
1547
	/* FDI must be feeding us bits for PCH ports */
1640
	/* FDI must be feeding us bits for PCH ports */
1548
	assert_fdi_tx_enabled(dev_priv, pipe);
1641
	assert_fdi_tx_enabled(dev_priv, pipe);
1549
	assert_fdi_rx_enabled(dev_priv, pipe);
1642
	assert_fdi_rx_enabled(dev_priv, pipe);
1550
 
1643
 
1551
	if (HAS_PCH_CPT(dev)) {
1644
	if (HAS_PCH_CPT(dev)) {
1552
		/* Workaround: Set the timing override bit before enabling the
1645
		/* Workaround: Set the timing override bit before enabling the
1553
		 * pch transcoder. */
1646
		 * pch transcoder. */
1554
		reg = TRANS_CHICKEN2(pipe);
1647
		reg = TRANS_CHICKEN2(pipe);
1555
		val = I915_READ(reg);
1648
		val = I915_READ(reg);
1556
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1649
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1557
		I915_WRITE(reg, val);
1650
		I915_WRITE(reg, val);
1558
	}
1651
	}
1559
 
1652
 
1560
	reg = PCH_TRANSCONF(pipe);
1653
	reg = PCH_TRANSCONF(pipe);
1561
	val = I915_READ(reg);
1654
	val = I915_READ(reg);
1562
	pipeconf_val = I915_READ(PIPECONF(pipe));
1655
	pipeconf_val = I915_READ(PIPECONF(pipe));
1563
 
1656
 
1564
	if (HAS_PCH_IBX(dev_priv->dev)) {
1657
	if (HAS_PCH_IBX(dev_priv->dev)) {
1565
		/*
1658
		/*
1566
		 * make the BPC in transcoder be consistent with
1659
		 * make the BPC in transcoder be consistent with
1567
		 * that in pipeconf reg.
1660
		 * that in pipeconf reg.
1568
		 */
1661
		 */
1569
		val &= ~PIPECONF_BPC_MASK;
1662
		val &= ~PIPECONF_BPC_MASK;
1570
		val |= pipeconf_val & PIPECONF_BPC_MASK;
1663
		val |= pipeconf_val & PIPECONF_BPC_MASK;
1571
	}
1664
	}
1572
 
1665
 
1573
	val &= ~TRANS_INTERLACE_MASK;
1666
	val &= ~TRANS_INTERLACE_MASK;
1574
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1667
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1575
		if (HAS_PCH_IBX(dev_priv->dev) &&
1668
		if (HAS_PCH_IBX(dev_priv->dev) &&
1576
		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1669
		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1577
			val |= TRANS_LEGACY_INTERLACED_ILK;
1670
			val |= TRANS_LEGACY_INTERLACED_ILK;
1578
		else
1671
		else
1579
			val |= TRANS_INTERLACED;
1672
			val |= TRANS_INTERLACED;
1580
	else
1673
	else
1581
		val |= TRANS_PROGRESSIVE;
1674
		val |= TRANS_PROGRESSIVE;
1582
 
1675
 
1583
	I915_WRITE(reg, val | TRANS_ENABLE);
1676
	I915_WRITE(reg, val | TRANS_ENABLE);
1584
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1677
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1585
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1678
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1586
}
1679
}
1587
 
1680
 
1588
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1681
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1589
				      enum transcoder cpu_transcoder)
1682
				      enum transcoder cpu_transcoder)
1590
{
1683
{
1591
	u32 val, pipeconf_val;
1684
	u32 val, pipeconf_val;
1592
 
1685
 
1593
	/* PCH only available on ILK+ */
1686
	/* PCH only available on ILK+ */
1594
	BUG_ON(dev_priv->info->gen < 5);
1687
	BUG_ON(dev_priv->info->gen < 5);
1595
 
1688
 
1596
	/* FDI must be feeding us bits for PCH ports */
1689
	/* FDI must be feeding us bits for PCH ports */
1597
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1690
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1598
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1691
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1599
 
1692
 
1600
	/* Workaround: set timing override bit. */
1693
	/* Workaround: set timing override bit. */
1601
	val = I915_READ(_TRANSA_CHICKEN2);
1694
	val = I915_READ(_TRANSA_CHICKEN2);
1602
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1695
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1603
	I915_WRITE(_TRANSA_CHICKEN2, val);
1696
	I915_WRITE(_TRANSA_CHICKEN2, val);
1604
 
1697
 
1605
	val = TRANS_ENABLE;
1698
	val = TRANS_ENABLE;
1606
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1699
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1607
 
1700
 
1608
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1701
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1609
	    PIPECONF_INTERLACED_ILK)
1702
	    PIPECONF_INTERLACED_ILK)
1610
		val |= TRANS_INTERLACED;
1703
		val |= TRANS_INTERLACED;
1611
	else
1704
	else
1612
		val |= TRANS_PROGRESSIVE;
1705
		val |= TRANS_PROGRESSIVE;
1613
 
1706
 
1614
	I915_WRITE(LPT_TRANSCONF, val);
1707
	I915_WRITE(LPT_TRANSCONF, val);
1615
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1708
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1616
		DRM_ERROR("Failed to enable PCH transcoder\n");
1709
		DRM_ERROR("Failed to enable PCH transcoder\n");
1617
}
1710
}
1618
 
1711
 
1619
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1712
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1620
				     enum pipe pipe)
1713
				     enum pipe pipe)
1621
{
1714
{
1622
	struct drm_device *dev = dev_priv->dev;
1715
	struct drm_device *dev = dev_priv->dev;
1623
	uint32_t reg, val;
1716
	uint32_t reg, val;
1624
 
1717
 
1625
	/* FDI relies on the transcoder */
1718
	/* FDI relies on the transcoder */
1626
	assert_fdi_tx_disabled(dev_priv, pipe);
1719
	assert_fdi_tx_disabled(dev_priv, pipe);
1627
	assert_fdi_rx_disabled(dev_priv, pipe);
1720
	assert_fdi_rx_disabled(dev_priv, pipe);
1628
 
1721
 
1629
	/* Ports must be off as well */
1722
	/* Ports must be off as well */
1630
	assert_pch_ports_disabled(dev_priv, pipe);
1723
	assert_pch_ports_disabled(dev_priv, pipe);
1631
 
1724
 
1632
	reg = PCH_TRANSCONF(pipe);
1725
	reg = PCH_TRANSCONF(pipe);
1633
	val = I915_READ(reg);
1726
	val = I915_READ(reg);
1634
	val &= ~TRANS_ENABLE;
1727
	val &= ~TRANS_ENABLE;
1635
	I915_WRITE(reg, val);
1728
	I915_WRITE(reg, val);
1636
	/* wait for PCH transcoder off, transcoder state */
1729
	/* wait for PCH transcoder off, transcoder state */
1637
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1730
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1638
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1731
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1639
 
1732
 
1640
	if (!HAS_PCH_IBX(dev)) {
1733
	if (!HAS_PCH_IBX(dev)) {
1641
		/* Workaround: Clear the timing override chicken bit again. */
1734
		/* Workaround: Clear the timing override chicken bit again. */
1642
		reg = TRANS_CHICKEN2(pipe);
1735
		reg = TRANS_CHICKEN2(pipe);
1643
		val = I915_READ(reg);
1736
		val = I915_READ(reg);
1644
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1737
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1645
		I915_WRITE(reg, val);
1738
		I915_WRITE(reg, val);
1646
	}
1739
	}
1647
}
1740
}
1648
 
1741
 
1649
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1742
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1650
{
1743
{
1651
	u32 val;
1744
	u32 val;
1652
 
1745
 
1653
	val = I915_READ(LPT_TRANSCONF);
1746
	val = I915_READ(LPT_TRANSCONF);
1654
	val &= ~TRANS_ENABLE;
1747
	val &= ~TRANS_ENABLE;
1655
	I915_WRITE(LPT_TRANSCONF, val);
1748
	I915_WRITE(LPT_TRANSCONF, val);
1656
	/* wait for PCH transcoder off, transcoder state */
1749
	/* wait for PCH transcoder off, transcoder state */
1657
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1750
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1658
		DRM_ERROR("Failed to disable PCH transcoder\n");
1751
		DRM_ERROR("Failed to disable PCH transcoder\n");
1659
 
1752
 
1660
	/* Workaround: clear timing override bit. */
1753
	/* Workaround: clear timing override bit. */
1661
	val = I915_READ(_TRANSA_CHICKEN2);
1754
	val = I915_READ(_TRANSA_CHICKEN2);
1662
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1755
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1663
	I915_WRITE(_TRANSA_CHICKEN2, val);
1756
	I915_WRITE(_TRANSA_CHICKEN2, val);
1664
}
1757
}
1665
 
1758
 
1666
/**
1759
/**
1667
 * intel_enable_pipe - enable a pipe, asserting requirements
1760
 * intel_enable_pipe - enable a pipe, asserting requirements
1668
 * @dev_priv: i915 private structure
1761
 * @dev_priv: i915 private structure
1669
 * @pipe: pipe to enable
1762
 * @pipe: pipe to enable
1670
 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1763
 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1671
 *
1764
 *
1672
 * Enable @pipe, making sure that various hardware specific requirements
1765
 * Enable @pipe, making sure that various hardware specific requirements
1673
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1766
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1674
 *
1767
 *
1675
 * @pipe should be %PIPE_A or %PIPE_B.
1768
 * @pipe should be %PIPE_A or %PIPE_B.
1676
 *
1769
 *
1677
 * Will wait until the pipe is actually running (i.e. first vblank) before
1770
 * Will wait until the pipe is actually running (i.e. first vblank) before
1678
 * returning.
1771
 * returning.
1679
 */
1772
 */
1680
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1773
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1681
			      bool pch_port)
1774
			      bool pch_port, bool dsi)
1682
{
1775
{
1683
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1776
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1684
								      pipe);
1777
								      pipe);
1685
	enum pipe pch_transcoder;
1778
	enum pipe pch_transcoder;
1686
	int reg;
1779
	int reg;
1687
	u32 val;
1780
	u32 val;
1688
 
1781
 
1689
	assert_planes_disabled(dev_priv, pipe);
1782
	assert_planes_disabled(dev_priv, pipe);
-
 
1783
	assert_cursor_disabled(dev_priv, pipe);
1690
	assert_sprites_disabled(dev_priv, pipe);
1784
	assert_sprites_disabled(dev_priv, pipe);
1691
 
1785
 
1692
	if (HAS_PCH_LPT(dev_priv->dev))
1786
	if (HAS_PCH_LPT(dev_priv->dev))
1693
		pch_transcoder = TRANSCODER_A;
1787
		pch_transcoder = TRANSCODER_A;
1694
	else
1788
	else
1695
		pch_transcoder = pipe;
1789
		pch_transcoder = pipe;
1696
 
1790
 
1697
	/*
1791
	/*
1698
	 * A pipe without a PLL won't actually be able to drive bits from
1792
	 * A pipe without a PLL won't actually be able to drive bits from
1699
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1793
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1700
	 * need the check.
1794
	 * need the check.
1701
	 */
1795
	 */
1702
	if (!HAS_PCH_SPLIT(dev_priv->dev))
1796
	if (!HAS_PCH_SPLIT(dev_priv->dev))
-
 
1797
		if (dsi)
-
 
1798
			assert_dsi_pll_enabled(dev_priv);
-
 
1799
		else
1703
		assert_pll_enabled(dev_priv, pipe);
1800
		assert_pll_enabled(dev_priv, pipe);
1704
	else {
1801
	else {
1705
		if (pch_port) {
1802
		if (pch_port) {
1706
			/* if driving the PCH, we need FDI enabled */
1803
			/* if driving the PCH, we need FDI enabled */
1707
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1804
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1708
			assert_fdi_tx_pll_enabled(dev_priv,
1805
			assert_fdi_tx_pll_enabled(dev_priv,
1709
						  (enum pipe) cpu_transcoder);
1806
						  (enum pipe) cpu_transcoder);
1710
		}
1807
		}
1711
		/* FIXME: assert CPU port conditions for SNB+ */
1808
		/* FIXME: assert CPU port conditions for SNB+ */
1712
	}
1809
	}
1713
 
1810
 
1714
	reg = PIPECONF(cpu_transcoder);
1811
	reg = PIPECONF(cpu_transcoder);
1715
	val = I915_READ(reg);
1812
	val = I915_READ(reg);
1716
	if (val & PIPECONF_ENABLE)
1813
	if (val & PIPECONF_ENABLE)
1717
		return;
1814
		return;
1718
 
1815
 
1719
	I915_WRITE(reg, val | PIPECONF_ENABLE);
1816
	I915_WRITE(reg, val | PIPECONF_ENABLE);
1720
	intel_wait_for_vblank(dev_priv->dev, pipe);
1817
	intel_wait_for_vblank(dev_priv->dev, pipe);
1721
}
1818
}
1722
 
1819
 
1723
/**
1820
/**
1724
 * intel_disable_pipe - disable a pipe, asserting requirements
1821
 * intel_disable_pipe - disable a pipe, asserting requirements
1725
 * @dev_priv: i915 private structure
1822
 * @dev_priv: i915 private structure
1726
 * @pipe: pipe to disable
1823
 * @pipe: pipe to disable
1727
 *
1824
 *
1728
 * Disable @pipe, making sure that various hardware specific requirements
1825
 * Disable @pipe, making sure that various hardware specific requirements
1729
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1826
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1730
 *
1827
 *
1731
 * @pipe should be %PIPE_A or %PIPE_B.
1828
 * @pipe should be %PIPE_A or %PIPE_B.
1732
 *
1829
 *
1733
 * Will wait until the pipe has shut down before returning.
1830
 * Will wait until the pipe has shut down before returning.
1734
 */
1831
 */
1735
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1832
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1736
			       enum pipe pipe)
1833
			       enum pipe pipe)
1737
{
1834
{
1738
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1835
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1739
								      pipe);
1836
								      pipe);
1740
	int reg;
1837
	int reg;
1741
	u32 val;
1838
	u32 val;
1742
 
1839
 
1743
    /*
1840
    /*
1744
	 * Make sure planes won't keep trying to pump pixels to us,
1841
	 * Make sure planes won't keep trying to pump pixels to us,
1745
	 * or we might hang the display.
1842
	 * or we might hang the display.
1746
	 */
1843
	 */
1747
	assert_planes_disabled(dev_priv, pipe);
1844
	assert_planes_disabled(dev_priv, pipe);
-
 
1845
	assert_cursor_disabled(dev_priv, pipe);
1748
	assert_sprites_disabled(dev_priv, pipe);
1846
	assert_sprites_disabled(dev_priv, pipe);
1749
 
1847
 
1750
	/* Don't disable pipe A or pipe A PLLs if needed */
1848
	/* Don't disable pipe A or pipe A PLLs if needed */
1751
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1849
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1752
		return;
1850
		return;
1753
 
1851
 
1754
	reg = PIPECONF(cpu_transcoder);
1852
	reg = PIPECONF(cpu_transcoder);
1755
	val = I915_READ(reg);
1853
	val = I915_READ(reg);
1756
	if ((val & PIPECONF_ENABLE) == 0)
1854
	if ((val & PIPECONF_ENABLE) == 0)
1757
		return;
1855
		return;
1758
 
1856
 
1759
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1857
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1760
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1858
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1761
}
1859
}
1762
 
1860
 
1763
/*
1861
/*
1764
 * Plane regs are double buffered, going from enabled->disabled needs a
1862
 * Plane regs are double buffered, going from enabled->disabled needs a
1765
 * trigger in order to latch.  The display address reg provides this.
1863
 * trigger in order to latch.  The display address reg provides this.
1766
 */
1864
 */
1767
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1865
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
1768
				      enum plane plane)
1866
				      enum plane plane)
1769
{
1867
{
1770
	if (dev_priv->info->gen >= 4)
-
 
1771
		I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1868
	u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
1772
	else
1869
 
1773
	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1870
	I915_WRITE(reg, I915_READ(reg));
-
 
1871
	POSTING_READ(reg);
1774
}
1872
}
1775
 
1873
 
1776
/**
1874
/**
1777
 * intel_enable_plane - enable a display plane on a given pipe
1875
 * intel_enable_primary_plane - enable the primary plane on a given pipe
1778
 * @dev_priv: i915 private structure
1876
 * @dev_priv: i915 private structure
1779
 * @plane: plane to enable
1877
 * @plane: plane to enable
1780
 * @pipe: pipe being fed
1878
 * @pipe: pipe being fed
1781
 *
1879
 *
1782
 * Enable @plane on @pipe, making sure that @pipe is running first.
1880
 * Enable @plane on @pipe, making sure that @pipe is running first.
1783
 */
1881
 */
1784
static void intel_enable_plane(struct drm_i915_private *dev_priv,
1882
static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
1785
			       enum plane plane, enum pipe pipe)
1883
			       enum plane plane, enum pipe pipe)
1786
{
1884
{
-
 
1885
	struct intel_crtc *intel_crtc =
-
 
1886
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1787
	int reg;
1887
	int reg;
1788
	u32 val;
1888
	u32 val;
1789
 
1889
 
1790
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1890
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1791
	assert_pipe_enabled(dev_priv, pipe);
1891
	assert_pipe_enabled(dev_priv, pipe);
-
 
1892
 
-
 
1893
	WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
-
 
1894
 
-
 
1895
	intel_crtc->primary_enabled = true;
1792
 
1896
 
1793
	reg = DSPCNTR(plane);
1897
	reg = DSPCNTR(plane);
1794
	val = I915_READ(reg);
1898
	val = I915_READ(reg);
1795
	if (val & DISPLAY_PLANE_ENABLE)
1899
	if (val & DISPLAY_PLANE_ENABLE)
1796
		return;
1900
		return;
1797
 
1901
 
1798
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1902
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1799
	intel_flush_display_plane(dev_priv, plane);
1903
	intel_flush_primary_plane(dev_priv, plane);
1800
	intel_wait_for_vblank(dev_priv->dev, pipe);
1904
	intel_wait_for_vblank(dev_priv->dev, pipe);
1801
}
1905
}
1802
 
1906
 
1803
/**
1907
/**
1804
 * intel_disable_plane - disable a display plane
1908
 * intel_disable_primary_plane - disable the primary plane
1805
 * @dev_priv: i915 private structure
1909
 * @dev_priv: i915 private structure
1806
 * @plane: plane to disable
1910
 * @plane: plane to disable
1807
 * @pipe: pipe consuming the data
1911
 * @pipe: pipe consuming the data
1808
 *
1912
 *
1809
 * Disable @plane; should be an independent operation.
1913
 * Disable @plane; should be an independent operation.
1810
 */
1914
 */
1811
static void intel_disable_plane(struct drm_i915_private *dev_priv,
1915
static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
1812
				enum plane plane, enum pipe pipe)
1916
				enum plane plane, enum pipe pipe)
1813
{
1917
{
-
 
1918
	struct intel_crtc *intel_crtc =
-
 
1919
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1814
	int reg;
1920
	int reg;
1815
	u32 val;
1921
	u32 val;
-
 
1922
 
-
 
1923
	WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
-
 
1924
 
-
 
1925
	intel_crtc->primary_enabled = false;
1816
 
1926
 
1817
	reg = DSPCNTR(plane);
1927
	reg = DSPCNTR(plane);
1818
	val = I915_READ(reg);
1928
	val = I915_READ(reg);
1819
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1929
	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1820
		return;
1930
		return;
1821
 
1931
 
1822
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1932
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1823
	intel_flush_display_plane(dev_priv, plane);
1933
	intel_flush_primary_plane(dev_priv, plane);
1824
    intel_wait_for_vblank(dev_priv->dev, pipe);
1934
    intel_wait_for_vblank(dev_priv->dev, pipe);
1825
}
1935
}
1826
 
1936
 
1827
static bool need_vtd_wa(struct drm_device *dev)
1937
static bool need_vtd_wa(struct drm_device *dev)
1828
{
1938
{
1829
#ifdef CONFIG_INTEL_IOMMU
1939
#ifdef CONFIG_INTEL_IOMMU
1830
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
1940
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
1831
		return true;
1941
		return true;
1832
#endif
1942
#endif
1833
	return false;
1943
	return false;
1834
}
1944
}
1835
 
1945
 
1836
int
1946
int
1837
intel_pin_and_fence_fb_obj(struct drm_device *dev,
1947
intel_pin_and_fence_fb_obj(struct drm_device *dev,
1838
			   struct drm_i915_gem_object *obj,
1948
			   struct drm_i915_gem_object *obj,
1839
			   struct intel_ring_buffer *pipelined)
1949
			   struct intel_ring_buffer *pipelined)
1840
{
1950
{
1841
	struct drm_i915_private *dev_priv = dev->dev_private;
1951
	struct drm_i915_private *dev_priv = dev->dev_private;
1842
	u32 alignment;
1952
	u32 alignment;
1843
	int ret;
1953
	int ret;
1844
 
1954
 
1845
	switch (obj->tiling_mode) {
1955
	switch (obj->tiling_mode) {
1846
	case I915_TILING_NONE:
1956
	case I915_TILING_NONE:
1847
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1957
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1848
			alignment = 128 * 1024;
1958
			alignment = 128 * 1024;
1849
		else if (INTEL_INFO(dev)->gen >= 4)
1959
		else if (INTEL_INFO(dev)->gen >= 4)
1850
			alignment = 4 * 1024;
1960
			alignment = 4 * 1024;
1851
		else
1961
		else
1852
			alignment = 64 * 1024;
1962
			alignment = 64 * 1024;
1853
		break;
1963
		break;
1854
	case I915_TILING_X:
1964
	case I915_TILING_X:
1855
		/* pin() will align the object as required by fence */
1965
		/* pin() will align the object as required by fence */
1856
		alignment = 0;
1966
		alignment = 0;
1857
		break;
1967
		break;
1858
	case I915_TILING_Y:
1968
	case I915_TILING_Y:
1859
		/* Despite that we check this in framebuffer_init userspace can
-
 
1860
		 * screw us over and change the tiling after the fact. Only
-
 
1861
		 * pinned buffers can't change their tiling. */
-
 
1862
		DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
1969
		WARN(1, "Y tiled bo slipped through, driver bug!\n");
1863
		return -EINVAL;
1970
		return -EINVAL;
1864
	default:
1971
	default:
1865
		BUG();
1972
		BUG();
1866
	}
1973
	}
1867
 
1974
 
1868
	/* Note that the w/a also requires 64 PTE of padding following the
1975
	/* Note that the w/a also requires 64 PTE of padding following the
1869
	 * bo. We currently fill all unused PTE with the shadow page and so
1976
	 * bo. We currently fill all unused PTE with the shadow page and so
1870
	 * we should always have valid PTE following the scanout preventing
1977
	 * we should always have valid PTE following the scanout preventing
1871
	 * the VT-d warning.
1978
	 * the VT-d warning.
1872
	 */
1979
	 */
1873
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
1980
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
1874
		alignment = 256 * 1024;
1981
		alignment = 256 * 1024;
1875
 
1982
 
1876
	dev_priv->mm.interruptible = false;
1983
	dev_priv->mm.interruptible = false;
1877
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1984
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1878
	if (ret)
1985
	if (ret)
1879
		goto err_interruptible;
1986
		goto err_interruptible;
1880
 
1987
 
1881
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1988
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1882
	 * fence, whereas 965+ only requires a fence if using
1989
	 * fence, whereas 965+ only requires a fence if using
1883
	 * framebuffer compression.  For simplicity, we always install
1990
	 * framebuffer compression.  For simplicity, we always install
1884
	 * a fence as the cost is not that onerous.
1991
	 * a fence as the cost is not that onerous.
1885
	 */
1992
	 */
1886
	ret = i915_gem_object_get_fence(obj);
1993
	ret = i915_gem_object_get_fence(obj);
1887
	if (ret)
1994
	if (ret)
1888
		goto err_unpin;
1995
		goto err_unpin;
1889
 
1996
 
1890
	i915_gem_object_pin_fence(obj);
1997
	i915_gem_object_pin_fence(obj);
1891
 
1998
 
1892
	dev_priv->mm.interruptible = true;
1999
	dev_priv->mm.interruptible = true;
1893
	return 0;
2000
	return 0;
1894
 
2001
 
1895
err_unpin:
2002
err_unpin:
1896
	i915_gem_object_unpin_from_display_plane(obj);
2003
	i915_gem_object_unpin_from_display_plane(obj);
1897
err_interruptible:
2004
err_interruptible:
1898
	dev_priv->mm.interruptible = true;
2005
	dev_priv->mm.interruptible = true;
1899
	return ret;
2006
	return ret;
1900
}
2007
}
1901
 
2008
 
1902
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2009
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1903
{
2010
{
1904
//	i915_gem_object_unpin_fence(obj);
2011
//	i915_gem_object_unpin_fence(obj);
1905
//	i915_gem_object_unpin(obj);
2012
//	i915_gem_object_unpin(obj);
1906
}
2013
}
1907
 
2014
 
1908
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2015
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1909
 * is assumed to be a power-of-two. */
2016
 * is assumed to be a power-of-two. */
1910
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2017
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
1911
					     unsigned int tiling_mode,
2018
					     unsigned int tiling_mode,
1912
					     unsigned int cpp,
2019
					     unsigned int cpp,
1913
							unsigned int pitch)
2020
							unsigned int pitch)
1914
{
2021
{
1915
	if (tiling_mode != I915_TILING_NONE) {
2022
	if (tiling_mode != I915_TILING_NONE) {
1916
		unsigned int tile_rows, tiles;
2023
		unsigned int tile_rows, tiles;
1917
 
2024
 
1918
	tile_rows = *y / 8;
2025
	tile_rows = *y / 8;
1919
	*y %= 8;
2026
	*y %= 8;
1920
 
2027
 
1921
		tiles = *x / (512/cpp);
2028
		tiles = *x / (512/cpp);
1922
		*x %= 512/cpp;
2029
		*x %= 512/cpp;
1923
 
2030
 
1924
	return tile_rows * pitch * 8 + tiles * 4096;
2031
	return tile_rows * pitch * 8 + tiles * 4096;
1925
	} else {
2032
	} else {
1926
		unsigned int offset;
2033
		unsigned int offset;
1927
 
2034
 
1928
		offset = *y * pitch + *x * cpp;
2035
		offset = *y * pitch + *x * cpp;
1929
		*y = 0;
2036
		*y = 0;
1930
		*x = (offset & 4095) / cpp;
2037
		*x = (offset & 4095) / cpp;
1931
		return offset & -4096;
2038
		return offset & -4096;
1932
	}
2039
	}
1933
}
2040
}
1934
 
2041
 
1935
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2042
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1936
                 int x, int y)
2043
                 int x, int y)
1937
{
2044
{
1938
    struct drm_device *dev = crtc->dev;
2045
    struct drm_device *dev = crtc->dev;
1939
    struct drm_i915_private *dev_priv = dev->dev_private;
2046
    struct drm_i915_private *dev_priv = dev->dev_private;
1940
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2047
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1941
    struct intel_framebuffer *intel_fb;
2048
    struct intel_framebuffer *intel_fb;
1942
    struct drm_i915_gem_object *obj;
2049
    struct drm_i915_gem_object *obj;
1943
    int plane = intel_crtc->plane;
2050
    int plane = intel_crtc->plane;
1944
	unsigned long linear_offset;
2051
	unsigned long linear_offset;
1945
    u32 dspcntr;
2052
    u32 dspcntr;
1946
    u32 reg;
2053
    u32 reg;
1947
 
2054
 
1948
    switch (plane) {
2055
    switch (plane) {
1949
    case 0:
2056
    case 0:
1950
    case 1:
2057
    case 1:
1951
        break;
2058
        break;
1952
    default:
2059
    default:
1953
		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2060
		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
1954
        return -EINVAL;
2061
        return -EINVAL;
1955
    }
2062
    }
1956
 
2063
 
1957
    intel_fb = to_intel_framebuffer(fb);
2064
    intel_fb = to_intel_framebuffer(fb);
1958
    obj = intel_fb->obj;
2065
    obj = intel_fb->obj;
1959
 
2066
 
1960
    reg = DSPCNTR(plane);
2067
    reg = DSPCNTR(plane);
1961
    dspcntr = I915_READ(reg);
2068
    dspcntr = I915_READ(reg);
1962
    /* Mask out pixel format bits in case we change it */
2069
    /* Mask out pixel format bits in case we change it */
1963
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2070
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1964
	switch (fb->pixel_format) {
2071
	switch (fb->pixel_format) {
1965
	case DRM_FORMAT_C8:
2072
	case DRM_FORMAT_C8:
1966
        dspcntr |= DISPPLANE_8BPP;
2073
        dspcntr |= DISPPLANE_8BPP;
1967
        break;
2074
        break;
1968
	case DRM_FORMAT_XRGB1555:
2075
	case DRM_FORMAT_XRGB1555:
1969
	case DRM_FORMAT_ARGB1555:
2076
	case DRM_FORMAT_ARGB1555:
1970
		dspcntr |= DISPPLANE_BGRX555;
2077
		dspcntr |= DISPPLANE_BGRX555;
1971
		break;
2078
		break;
1972
	case DRM_FORMAT_RGB565:
2079
	case DRM_FORMAT_RGB565:
1973
		dspcntr |= DISPPLANE_BGRX565;
2080
		dspcntr |= DISPPLANE_BGRX565;
1974
		break;
2081
		break;
1975
	case DRM_FORMAT_XRGB8888:
2082
	case DRM_FORMAT_XRGB8888:
1976
	case DRM_FORMAT_ARGB8888:
2083
	case DRM_FORMAT_ARGB8888:
1977
		dspcntr |= DISPPLANE_BGRX888;
2084
		dspcntr |= DISPPLANE_BGRX888;
1978
		break;
2085
		break;
1979
	case DRM_FORMAT_XBGR8888:
2086
	case DRM_FORMAT_XBGR8888:
1980
	case DRM_FORMAT_ABGR8888:
2087
	case DRM_FORMAT_ABGR8888:
1981
		dspcntr |= DISPPLANE_RGBX888;
2088
		dspcntr |= DISPPLANE_RGBX888;
1982
		break;
2089
		break;
1983
	case DRM_FORMAT_XRGB2101010:
2090
	case DRM_FORMAT_XRGB2101010:
1984
	case DRM_FORMAT_ARGB2101010:
2091
	case DRM_FORMAT_ARGB2101010:
1985
		dspcntr |= DISPPLANE_BGRX101010;
2092
		dspcntr |= DISPPLANE_BGRX101010;
1986
        break;
2093
        break;
1987
	case DRM_FORMAT_XBGR2101010:
2094
	case DRM_FORMAT_XBGR2101010:
1988
	case DRM_FORMAT_ABGR2101010:
2095
	case DRM_FORMAT_ABGR2101010:
1989
		dspcntr |= DISPPLANE_RGBX101010;
2096
		dspcntr |= DISPPLANE_RGBX101010;
1990
        break;
2097
        break;
1991
    default:
2098
    default:
1992
		BUG();
2099
		BUG();
1993
    }
2100
    }
1994
 
2101
 
1995
    if (INTEL_INFO(dev)->gen >= 4) {
2102
    if (INTEL_INFO(dev)->gen >= 4) {
1996
        if (obj->tiling_mode != I915_TILING_NONE)
2103
        if (obj->tiling_mode != I915_TILING_NONE)
1997
            dspcntr |= DISPPLANE_TILED;
2104
            dspcntr |= DISPPLANE_TILED;
1998
        else
2105
        else
1999
            dspcntr &= ~DISPPLANE_TILED;
2106
            dspcntr &= ~DISPPLANE_TILED;
2000
    }
2107
    }
2001
 
2108
 
2002
	if (IS_G4X(dev))
2109
	if (IS_G4X(dev))
2003
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2110
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2004
 
2111
 
2005
    I915_WRITE(reg, dspcntr);
2112
    I915_WRITE(reg, dspcntr);
2006
 
2113
 
2007
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2114
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2008
 
2115
 
2009
	if (INTEL_INFO(dev)->gen >= 4) {
2116
	if (INTEL_INFO(dev)->gen >= 4) {
2010
		intel_crtc->dspaddr_offset =
2117
		intel_crtc->dspaddr_offset =
2011
			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2118
			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2012
							   fb->bits_per_pixel / 8,
2119
							   fb->bits_per_pixel / 8,
2013
							   fb->pitches[0]);
2120
							   fb->pitches[0]);
2014
		linear_offset -= intel_crtc->dspaddr_offset;
2121
		linear_offset -= intel_crtc->dspaddr_offset;
2015
	} else {
2122
	} else {
2016
		intel_crtc->dspaddr_offset = linear_offset;
2123
		intel_crtc->dspaddr_offset = linear_offset;
2017
	}
2124
	}
2018
 
2125
 
2019
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2126
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2020
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2127
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2021
		      fb->pitches[0]);
2128
		      fb->pitches[0]);
2022
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2129
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2023
    if (INTEL_INFO(dev)->gen >= 4) {
2130
    if (INTEL_INFO(dev)->gen >= 4) {
2024
		I915_MODIFY_DISPBASE(DSPSURF(plane),
2131
		I915_WRITE(DSPSURF(plane),
2025
				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2132
				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2026
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2133
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2027
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2134
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2028
    } else
2135
    } else
2029
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2136
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2030
    POSTING_READ(reg);
2137
    POSTING_READ(reg);
2031
 
2138
 
2032
    return 0;
2139
    return 0;
2033
}
2140
}
2034
 
2141
 
2035
static int ironlake_update_plane(struct drm_crtc *crtc,
2142
static int ironlake_update_plane(struct drm_crtc *crtc,
2036
                 struct drm_framebuffer *fb, int x, int y)
2143
                 struct drm_framebuffer *fb, int x, int y)
2037
{
2144
{
2038
    struct drm_device *dev = crtc->dev;
2145
    struct drm_device *dev = crtc->dev;
2039
    struct drm_i915_private *dev_priv = dev->dev_private;
2146
    struct drm_i915_private *dev_priv = dev->dev_private;
2040
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2147
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2041
    struct intel_framebuffer *intel_fb;
2148
    struct intel_framebuffer *intel_fb;
2042
    struct drm_i915_gem_object *obj;
2149
    struct drm_i915_gem_object *obj;
2043
    int plane = intel_crtc->plane;
2150
    int plane = intel_crtc->plane;
2044
	unsigned long linear_offset;
2151
	unsigned long linear_offset;
2045
    u32 dspcntr;
2152
    u32 dspcntr;
2046
    u32 reg;
2153
    u32 reg;
2047
 
2154
 
2048
    switch (plane) {
2155
    switch (plane) {
2049
    case 0:
2156
    case 0:
2050
    case 1:
2157
    case 1:
2051
	case 2:
2158
	case 2:
2052
        break;
2159
        break;
2053
    default:
2160
    default:
2054
		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2161
		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2055
        return -EINVAL;
2162
        return -EINVAL;
2056
    }
2163
    }
2057
 
2164
 
2058
    intel_fb = to_intel_framebuffer(fb);
2165
    intel_fb = to_intel_framebuffer(fb);
2059
    obj = intel_fb->obj;
2166
    obj = intel_fb->obj;
2060
 
2167
 
2061
    reg = DSPCNTR(plane);
2168
    reg = DSPCNTR(plane);
2062
    dspcntr = I915_READ(reg);
2169
    dspcntr = I915_READ(reg);
2063
    /* Mask out pixel format bits in case we change it */
2170
    /* Mask out pixel format bits in case we change it */
2064
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2171
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2065
	switch (fb->pixel_format) {
2172
	switch (fb->pixel_format) {
2066
	case DRM_FORMAT_C8:
2173
	case DRM_FORMAT_C8:
2067
        dspcntr |= DISPPLANE_8BPP;
2174
        dspcntr |= DISPPLANE_8BPP;
2068
        break;
2175
        break;
2069
	case DRM_FORMAT_RGB565:
2176
	case DRM_FORMAT_RGB565:
2070
		dspcntr |= DISPPLANE_BGRX565;
2177
		dspcntr |= DISPPLANE_BGRX565;
2071
        break;
2178
        break;
2072
	case DRM_FORMAT_XRGB8888:
2179
	case DRM_FORMAT_XRGB8888:
2073
	case DRM_FORMAT_ARGB8888:
2180
	case DRM_FORMAT_ARGB8888:
2074
		dspcntr |= DISPPLANE_BGRX888;
2181
		dspcntr |= DISPPLANE_BGRX888;
2075
		break;
2182
		break;
2076
	case DRM_FORMAT_XBGR8888:
2183
	case DRM_FORMAT_XBGR8888:
2077
	case DRM_FORMAT_ABGR8888:
2184
	case DRM_FORMAT_ABGR8888:
2078
		dspcntr |= DISPPLANE_RGBX888;
2185
		dspcntr |= DISPPLANE_RGBX888;
2079
		break;
2186
		break;
2080
	case DRM_FORMAT_XRGB2101010:
2187
	case DRM_FORMAT_XRGB2101010:
2081
	case DRM_FORMAT_ARGB2101010:
2188
	case DRM_FORMAT_ARGB2101010:
2082
		dspcntr |= DISPPLANE_BGRX101010;
2189
		dspcntr |= DISPPLANE_BGRX101010;
2083
		break;
2190
		break;
2084
	case DRM_FORMAT_XBGR2101010:
2191
	case DRM_FORMAT_XBGR2101010:
2085
	case DRM_FORMAT_ABGR2101010:
2192
	case DRM_FORMAT_ABGR2101010:
2086
		dspcntr |= DISPPLANE_RGBX101010;
2193
		dspcntr |= DISPPLANE_RGBX101010;
2087
        break;
2194
        break;
2088
    default:
2195
    default:
2089
		BUG();
2196
		BUG();
2090
    }
2197
    }
2091
 
2198
 
2092
	if (obj->tiling_mode != I915_TILING_NONE)
2199
	if (obj->tiling_mode != I915_TILING_NONE)
2093
		dspcntr |= DISPPLANE_TILED;
2200
		dspcntr |= DISPPLANE_TILED;
2094
	else
2201
	else
2095
        dspcntr &= ~DISPPLANE_TILED;
2202
        dspcntr &= ~DISPPLANE_TILED;
2096
 
2203
 
2097
	if (IS_HASWELL(dev))
2204
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2098
		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2205
		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2099
	else
2206
	else
2100
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2207
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2101
 
2208
 
2102
    I915_WRITE(reg, dspcntr);
2209
    I915_WRITE(reg, dspcntr);
2103
 
2210
 
2104
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2211
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2105
	intel_crtc->dspaddr_offset =
2212
	intel_crtc->dspaddr_offset =
2106
		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2213
		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2107
						   fb->bits_per_pixel / 8,
2214
						   fb->bits_per_pixel / 8,
2108
						   fb->pitches[0]);
2215
						   fb->pitches[0]);
2109
	linear_offset -= intel_crtc->dspaddr_offset;
2216
	linear_offset -= intel_crtc->dspaddr_offset;
2110
 
2217
 
2111
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2218
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2112
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2219
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2113
		      fb->pitches[0]);
2220
		      fb->pitches[0]);
2114
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2221
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2115
	I915_MODIFY_DISPBASE(DSPSURF(plane),
2222
	I915_WRITE(DSPSURF(plane),
2116
			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2223
			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2117
	if (IS_HASWELL(dev)) {
2224
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2118
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2225
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2119
	} else {
2226
	} else {
2120
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2227
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2121
	I915_WRITE(DSPLINOFF(plane), linear_offset);
2228
	I915_WRITE(DSPLINOFF(plane), linear_offset);
2122
	}
2229
	}
2123
	POSTING_READ(reg);
2230
	POSTING_READ(reg);
2124
 
2231
 
2125
    return 0;
2232
    return 0;
2126
}
2233
}
2127
 
2234
 
2128
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2235
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2129
static int
2236
static int
2130
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2237
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2131
			   int x, int y, enum mode_set_atomic state)
2238
			   int x, int y, enum mode_set_atomic state)
2132
{
2239
{
2133
	struct drm_device *dev = crtc->dev;
2240
	struct drm_device *dev = crtc->dev;
2134
	struct drm_i915_private *dev_priv = dev->dev_private;
2241
	struct drm_i915_private *dev_priv = dev->dev_private;
2135
 
2242
 
2136
	if (dev_priv->display.disable_fbc)
2243
	if (dev_priv->display.disable_fbc)
2137
		dev_priv->display.disable_fbc(dev);
2244
		dev_priv->display.disable_fbc(dev);
2138
	intel_increase_pllclock(crtc);
2245
	intel_increase_pllclock(crtc);
2139
 
2246
 
2140
	return dev_priv->display.update_plane(crtc, fb, x, y);
2247
	return dev_priv->display.update_plane(crtc, fb, x, y);
2141
}
2248
}
2142
 
2249
 
2143
#if 0
2250
#if 0
2144
void intel_display_handle_reset(struct drm_device *dev)
2251
void intel_display_handle_reset(struct drm_device *dev)
2145
{
2252
{
2146
	struct drm_i915_private *dev_priv = dev->dev_private;
2253
	struct drm_i915_private *dev_priv = dev->dev_private;
2147
	struct drm_crtc *crtc;
2254
	struct drm_crtc *crtc;
2148
 
2255
 
2149
	/*
2256
	/*
2150
	 * Flips in the rings have been nuked by the reset,
2257
	 * Flips in the rings have been nuked by the reset,
2151
	 * so complete all pending flips so that user space
2258
	 * so complete all pending flips so that user space
2152
	 * will get its events and not get stuck.
2259
	 * will get its events and not get stuck.
2153
	 *
2260
	 *
2154
	 * Also update the base address of all primary
2261
	 * Also update the base address of all primary
2155
	 * planes to the the last fb to make sure we're
2262
	 * planes to the the last fb to make sure we're
2156
	 * showing the correct fb after a reset.
2263
	 * showing the correct fb after a reset.
2157
	 *
2264
	 *
2158
	 * Need to make two loops over the crtcs so that we
2265
	 * Need to make two loops over the crtcs so that we
2159
	 * don't try to grab a crtc mutex before the
2266
	 * don't try to grab a crtc mutex before the
2160
	 * pending_flip_queue really got woken up.
2267
	 * pending_flip_queue really got woken up.
2161
	 */
2268
	 */
2162
 
2269
 
2163
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2270
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2164
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2271
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2165
		enum plane plane = intel_crtc->plane;
2272
		enum plane plane = intel_crtc->plane;
2166
 
2273
 
2167
		intel_prepare_page_flip(dev, plane);
2274
		intel_prepare_page_flip(dev, plane);
2168
		intel_finish_page_flip_plane(dev, plane);
2275
		intel_finish_page_flip_plane(dev, plane);
2169
	}
2276
	}
2170
 
2277
 
2171
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2278
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2172
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2279
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2173
 
2280
 
2174
		mutex_lock(&crtc->mutex);
2281
		mutex_lock(&crtc->mutex);
-
 
2282
		/*
-
 
2283
		 * FIXME: Once we have proper support for primary planes (and
-
 
2284
		 * disabling them without disabling the entire crtc) allow again
-
 
2285
		 * a NULL crtc->fb.
-
 
2286
		 */
2175
		if (intel_crtc->active)
2287
		if (intel_crtc->active && crtc->fb)
2176
			dev_priv->display.update_plane(crtc, crtc->fb,
2288
			dev_priv->display.update_plane(crtc, crtc->fb,
2177
						       crtc->x, crtc->y);
2289
						       crtc->x, crtc->y);
2178
		mutex_unlock(&crtc->mutex);
2290
		mutex_unlock(&crtc->mutex);
2179
	}
2291
	}
2180
}
2292
}
2181
 
2293
 
2182
static int
2294
static int
2183
intel_finish_fb(struct drm_framebuffer *old_fb)
2295
intel_finish_fb(struct drm_framebuffer *old_fb)
2184
{
2296
{
2185
	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2297
	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2186
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2298
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2187
	bool was_interruptible = dev_priv->mm.interruptible;
2299
	bool was_interruptible = dev_priv->mm.interruptible;
2188
	int ret;
2300
	int ret;
2189
 
2301
 
2190
	/* Big Hammer, we also need to ensure that any pending
2302
	/* Big Hammer, we also need to ensure that any pending
2191
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2303
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2192
	 * current scanout is retired before unpinning the old
2304
	 * current scanout is retired before unpinning the old
2193
	 * framebuffer.
2305
	 * framebuffer.
2194
	 *
2306
	 *
2195
	 * This should only fail upon a hung GPU, in which case we
2307
	 * This should only fail upon a hung GPU, in which case we
2196
	 * can safely continue.
2308
	 * can safely continue.
2197
	 */
2309
	 */
2198
	dev_priv->mm.interruptible = false;
2310
	dev_priv->mm.interruptible = false;
2199
	ret = i915_gem_object_finish_gpu(obj);
2311
	ret = i915_gem_object_finish_gpu(obj);
2200
	dev_priv->mm.interruptible = was_interruptible;
2312
	dev_priv->mm.interruptible = was_interruptible;
2201
 
2313
 
2202
	return ret;
2314
	return ret;
2203
}
2315
}
2204
 
2316
 
2205
static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2317
static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2206
{
2318
{
2207
	struct drm_device *dev = crtc->dev;
2319
	struct drm_device *dev = crtc->dev;
2208
	struct drm_i915_master_private *master_priv;
2320
	struct drm_i915_master_private *master_priv;
2209
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2321
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2210
 
2322
 
2211
	if (!dev->primary->master)
2323
	if (!dev->primary->master)
2212
		return;
2324
		return;
2213
 
2325
 
2214
	master_priv = dev->primary->master->driver_priv;
2326
	master_priv = dev->primary->master->driver_priv;
2215
	if (!master_priv->sarea_priv)
2327
	if (!master_priv->sarea_priv)
2216
		return;
2328
		return;
2217
 
2329
 
2218
	switch (intel_crtc->pipe) {
2330
	switch (intel_crtc->pipe) {
2219
	case 0:
2331
	case 0:
2220
		master_priv->sarea_priv->pipeA_x = x;
2332
		master_priv->sarea_priv->pipeA_x = x;
2221
		master_priv->sarea_priv->pipeA_y = y;
2333
		master_priv->sarea_priv->pipeA_y = y;
2222
		break;
2334
		break;
2223
	case 1:
2335
	case 1:
2224
		master_priv->sarea_priv->pipeB_x = x;
2336
		master_priv->sarea_priv->pipeB_x = x;
2225
		master_priv->sarea_priv->pipeB_y = y;
2337
		master_priv->sarea_priv->pipeB_y = y;
2226
		break;
2338
		break;
2227
	default:
2339
	default:
2228
		break;
2340
		break;
2229
	}
2341
	}
2230
}
2342
}
2231
#endif
2343
#endif
2232
 
2344
 
2233
static int
2345
static int
2234
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2346
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2235
		    struct drm_framebuffer *fb)
2347
		    struct drm_framebuffer *fb)
2236
{
2348
{
2237
	struct drm_device *dev = crtc->dev;
2349
	struct drm_device *dev = crtc->dev;
2238
	struct drm_i915_private *dev_priv = dev->dev_private;
2350
	struct drm_i915_private *dev_priv = dev->dev_private;
2239
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2351
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2240
	struct drm_framebuffer *old_fb;
2352
	struct drm_framebuffer *old_fb;
2241
	int ret;
2353
	int ret;
2242
 
2354
 
2243
	/* no fb bound */
2355
	/* no fb bound */
2244
	if (!fb) {
2356
	if (!fb) {
2245
		DRM_ERROR("No FB bound\n");
2357
		DRM_ERROR("No FB bound\n");
2246
		return 0;
2358
		return 0;
2247
	}
2359
	}
2248
 
2360
 
2249
	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2361
	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2250
		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2362
		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2251
			  plane_name(intel_crtc->plane),
2363
			  plane_name(intel_crtc->plane),
2252
				INTEL_INFO(dev)->num_pipes);
2364
				INTEL_INFO(dev)->num_pipes);
2253
		return -EINVAL;
2365
		return -EINVAL;
2254
	}
2366
	}
2255
 
2367
 
2256
	mutex_lock(&dev->struct_mutex);
2368
	mutex_lock(&dev->struct_mutex);
2257
    ret = intel_pin_and_fence_fb_obj(dev,
2369
    ret = intel_pin_and_fence_fb_obj(dev,
2258
                    to_intel_framebuffer(fb)->obj,
2370
                    to_intel_framebuffer(fb)->obj,
2259
                    NULL);
2371
                    NULL);
2260
    if (ret != 0) {
2372
    if (ret != 0) {
2261
       mutex_unlock(&dev->struct_mutex);
2373
       mutex_unlock(&dev->struct_mutex);
2262
       DRM_ERROR("pin & fence failed\n");
2374
       DRM_ERROR("pin & fence failed\n");
2263
       return ret;
2375
       return ret;
2264
    }
2376
    }
-
 
2377
 
2265
 
2378
	/*
-
 
2379
	 * Update pipe size and adjust fitter if needed: the reason for this is
-
 
2380
	 * that in compute_mode_changes we check the native mode (not the pfit
-
 
2381
	 * mode) to see if we can flip rather than do a full mode set. In the
-
 
2382
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
-
 
2383
	 * pfit state, we'll end up with a big fb scanned out into the wrong
-
 
2384
	 * sized surface.
-
 
2385
	 *
-
 
2386
	 * To fix this properly, we need to hoist the checks up into
-
 
2387
	 * compute_mode_changes (or above), check the actual pfit state and
-
 
2388
	 * whether the platform allows pfit disable with pipe active, and only
-
 
2389
	 * then update the pipesrc and pfit state, even on the flip path.
2266
	/* Update pipe size and adjust fitter if needed */
2390
	 */
-
 
2391
	if (i915_fastboot) {
-
 
2392
		const struct drm_display_mode *adjusted_mode =
-
 
2393
			&intel_crtc->config.adjusted_mode;
2267
	if (i915_fastboot) {
2394
 
2268
		I915_WRITE(PIPESRC(intel_crtc->pipe),
2395
		I915_WRITE(PIPESRC(intel_crtc->pipe),
2269
			   ((crtc->mode.hdisplay - 1) << 16) |
2396
			   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2270
			   (crtc->mode.vdisplay - 1));
2397
			   (adjusted_mode->crtc_vdisplay - 1));
2271
		if (!intel_crtc->config.pch_pfit.enabled &&
2398
		if (!intel_crtc->config.pch_pfit.enabled &&
2272
		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2399
		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2273
		     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2400
		     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2274
			I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2401
			I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2275
			I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2402
			I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2276
			I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2403
			I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2277
		}
2404
		}
-
 
2405
		intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
-
 
2406
		intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2278
	}
2407
	}
2279
 
2408
 
2280
	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2409
	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2281
	if (ret) {
2410
	if (ret) {
2282
		intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2411
		intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2283
		mutex_unlock(&dev->struct_mutex);
2412
		mutex_unlock(&dev->struct_mutex);
2284
		DRM_ERROR("failed to update base address\n");
2413
		DRM_ERROR("failed to update base address\n");
2285
        return ret;
2414
        return ret;
2286
	}
2415
	}
2287
 
2416
 
2288
	old_fb = crtc->fb;
2417
	old_fb = crtc->fb;
2289
	crtc->fb = fb;
2418
	crtc->fb = fb;
2290
	crtc->x = x;
2419
	crtc->x = x;
2291
	crtc->y = y;
2420
	crtc->y = y;
2292
 
2421
 
2293
	if (old_fb) {
2422
	if (old_fb) {
2294
		if (intel_crtc->active && old_fb != fb)
2423
		if (intel_crtc->active && old_fb != fb)
2295
		intel_wait_for_vblank(dev, intel_crtc->pipe);
2424
		intel_wait_for_vblank(dev, intel_crtc->pipe);
2296
		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2425
		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2297
	}
2426
	}
2298
 
2427
 
2299
	intel_update_fbc(dev);
2428
	intel_update_fbc(dev);
2300
	intel_edp_psr_update(dev);
2429
	intel_edp_psr_update(dev);
2301
	mutex_unlock(&dev->struct_mutex);
2430
	mutex_unlock(&dev->struct_mutex);
2302
 
2431
 
2303
    return 0;
2432
    return 0;
2304
}
2433
}
2305
 
2434
 
2306
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2435
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2307
{
2436
{
2308
	struct drm_device *dev = crtc->dev;
2437
	struct drm_device *dev = crtc->dev;
2309
	struct drm_i915_private *dev_priv = dev->dev_private;
2438
	struct drm_i915_private *dev_priv = dev->dev_private;
2310
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2439
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2311
	int pipe = intel_crtc->pipe;
2440
	int pipe = intel_crtc->pipe;
2312
	u32 reg, temp;
2441
	u32 reg, temp;
2313
 
2442
 
2314
	/* enable normal train */
2443
	/* enable normal train */
2315
	reg = FDI_TX_CTL(pipe);
2444
	reg = FDI_TX_CTL(pipe);
2316
	temp = I915_READ(reg);
2445
	temp = I915_READ(reg);
2317
	if (IS_IVYBRIDGE(dev)) {
2446
	if (IS_IVYBRIDGE(dev)) {
2318
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2447
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2319
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2448
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2320
	} else {
2449
	} else {
2321
		temp &= ~FDI_LINK_TRAIN_NONE;
2450
		temp &= ~FDI_LINK_TRAIN_NONE;
2322
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2451
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2323
	}
2452
	}
2324
	I915_WRITE(reg, temp);
2453
	I915_WRITE(reg, temp);
2325
 
2454
 
2326
	reg = FDI_RX_CTL(pipe);
2455
	reg = FDI_RX_CTL(pipe);
2327
	temp = I915_READ(reg);
2456
	temp = I915_READ(reg);
2328
	if (HAS_PCH_CPT(dev)) {
2457
	if (HAS_PCH_CPT(dev)) {
2329
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2458
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2330
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2459
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2331
	} else {
2460
	} else {
2332
		temp &= ~FDI_LINK_TRAIN_NONE;
2461
		temp &= ~FDI_LINK_TRAIN_NONE;
2333
		temp |= FDI_LINK_TRAIN_NONE;
2462
		temp |= FDI_LINK_TRAIN_NONE;
2334
	}
2463
	}
2335
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2464
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2336
 
2465
 
2337
	/* wait one idle pattern time */
2466
	/* wait one idle pattern time */
2338
	POSTING_READ(reg);
2467
	POSTING_READ(reg);
2339
	udelay(1000);
2468
	udelay(1000);
2340
 
2469
 
2341
	/* IVB wants error correction enabled */
2470
	/* IVB wants error correction enabled */
2342
	if (IS_IVYBRIDGE(dev))
2471
	if (IS_IVYBRIDGE(dev))
2343
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2472
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2344
			   FDI_FE_ERRC_ENABLE);
2473
			   FDI_FE_ERRC_ENABLE);
2345
}
2474
}
2346
 
2475
 
2347
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2476
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2348
{
2477
{
2349
	return crtc->base.enabled && crtc->active &&
2478
	return crtc->base.enabled && crtc->active &&
2350
		crtc->config.has_pch_encoder;
2479
		crtc->config.has_pch_encoder;
2351
}
2480
}
2352
 
2481
 
2353
static void ivb_modeset_global_resources(struct drm_device *dev)
2482
static void ivb_modeset_global_resources(struct drm_device *dev)
2354
{
2483
{
2355
	struct drm_i915_private *dev_priv = dev->dev_private;
2484
	struct drm_i915_private *dev_priv = dev->dev_private;
2356
	struct intel_crtc *pipe_B_crtc =
2485
	struct intel_crtc *pipe_B_crtc =
2357
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2486
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2358
	struct intel_crtc *pipe_C_crtc =
2487
	struct intel_crtc *pipe_C_crtc =
2359
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2488
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2360
	uint32_t temp;
2489
	uint32_t temp;
2361
 
2490
 
2362
	/*
2491
	/*
2363
	 * When everything is off disable fdi C so that we could enable fdi B
2492
	 * When everything is off disable fdi C so that we could enable fdi B
2364
	 * with all lanes. Note that we don't care about enabled pipes without
2493
	 * with all lanes. Note that we don't care about enabled pipes without
2365
	 * an enabled pch encoder.
2494
	 * an enabled pch encoder.
2366
	 */
2495
	 */
2367
	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2496
	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2368
	    !pipe_has_enabled_pch(pipe_C_crtc)) {
2497
	    !pipe_has_enabled_pch(pipe_C_crtc)) {
2369
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2498
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2370
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2499
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2371
 
2500
 
2372
		temp = I915_READ(SOUTH_CHICKEN1);
2501
		temp = I915_READ(SOUTH_CHICKEN1);
2373
		temp &= ~FDI_BC_BIFURCATION_SELECT;
2502
		temp &= ~FDI_BC_BIFURCATION_SELECT;
2374
		DRM_DEBUG_KMS("disabling fdi C rx\n");
2503
		DRM_DEBUG_KMS("disabling fdi C rx\n");
2375
		I915_WRITE(SOUTH_CHICKEN1, temp);
2504
		I915_WRITE(SOUTH_CHICKEN1, temp);
2376
	}
2505
	}
2377
}
2506
}
2378
 
2507
 
2379
/* The FDI link training functions for ILK/Ibexpeak. */
2508
/* The FDI link training functions for ILK/Ibexpeak. */
2380
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2509
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2381
{
2510
{
2382
    struct drm_device *dev = crtc->dev;
2511
    struct drm_device *dev = crtc->dev;
2383
    struct drm_i915_private *dev_priv = dev->dev_private;
2512
    struct drm_i915_private *dev_priv = dev->dev_private;
2384
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2513
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2385
    int pipe = intel_crtc->pipe;
2514
    int pipe = intel_crtc->pipe;
2386
    int plane = intel_crtc->plane;
2515
    int plane = intel_crtc->plane;
2387
    u32 reg, temp, tries;
2516
    u32 reg, temp, tries;
2388
 
2517
 
2389
    /* FDI needs bits from pipe & plane first */
2518
    /* FDI needs bits from pipe & plane first */
2390
    assert_pipe_enabled(dev_priv, pipe);
2519
    assert_pipe_enabled(dev_priv, pipe);
2391
    assert_plane_enabled(dev_priv, plane);
2520
    assert_plane_enabled(dev_priv, plane);
2392
 
2521
 
2393
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2522
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2394
       for train result */
2523
       for train result */
2395
    reg = FDI_RX_IMR(pipe);
2524
    reg = FDI_RX_IMR(pipe);
2396
    temp = I915_READ(reg);
2525
    temp = I915_READ(reg);
2397
    temp &= ~FDI_RX_SYMBOL_LOCK;
2526
    temp &= ~FDI_RX_SYMBOL_LOCK;
2398
    temp &= ~FDI_RX_BIT_LOCK;
2527
    temp &= ~FDI_RX_BIT_LOCK;
2399
    I915_WRITE(reg, temp);
2528
    I915_WRITE(reg, temp);
2400
    I915_READ(reg);
2529
    I915_READ(reg);
2401
    udelay(150);
2530
    udelay(150);
2402
 
2531
 
2403
    /* enable CPU FDI TX and PCH FDI RX */
2532
    /* enable CPU FDI TX and PCH FDI RX */
2404
    reg = FDI_TX_CTL(pipe);
2533
    reg = FDI_TX_CTL(pipe);
2405
    temp = I915_READ(reg);
2534
    temp = I915_READ(reg);
2406
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2535
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2407
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2536
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2408
    temp &= ~FDI_LINK_TRAIN_NONE;
2537
    temp &= ~FDI_LINK_TRAIN_NONE;
2409
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2538
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2410
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2539
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2411
 
2540
 
2412
    reg = FDI_RX_CTL(pipe);
2541
    reg = FDI_RX_CTL(pipe);
2413
    temp = I915_READ(reg);
2542
    temp = I915_READ(reg);
2414
    temp &= ~FDI_LINK_TRAIN_NONE;
2543
    temp &= ~FDI_LINK_TRAIN_NONE;
2415
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2544
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2416
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2545
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2417
 
2546
 
2418
    POSTING_READ(reg);
2547
    POSTING_READ(reg);
2419
    udelay(150);
2548
    udelay(150);
2420
 
2549
 
2421
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2550
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2422
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2551
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2423
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2552
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2424
               FDI_RX_PHASE_SYNC_POINTER_EN);
2553
               FDI_RX_PHASE_SYNC_POINTER_EN);
2425
 
2554
 
2426
    reg = FDI_RX_IIR(pipe);
2555
    reg = FDI_RX_IIR(pipe);
2427
    for (tries = 0; tries < 5; tries++) {
2556
    for (tries = 0; tries < 5; tries++) {
2428
        temp = I915_READ(reg);
2557
        temp = I915_READ(reg);
2429
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2558
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2430
 
2559
 
2431
        if ((temp & FDI_RX_BIT_LOCK)) {
2560
        if ((temp & FDI_RX_BIT_LOCK)) {
2432
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2561
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2433
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2562
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2434
            break;
2563
            break;
2435
        }
2564
        }
2436
    }
2565
    }
2437
    if (tries == 5)
2566
    if (tries == 5)
2438
        DRM_ERROR("FDI train 1 fail!\n");
2567
        DRM_ERROR("FDI train 1 fail!\n");
2439
 
2568
 
2440
    /* Train 2 */
2569
    /* Train 2 */
2441
    reg = FDI_TX_CTL(pipe);
2570
    reg = FDI_TX_CTL(pipe);
2442
    temp = I915_READ(reg);
2571
    temp = I915_READ(reg);
2443
    temp &= ~FDI_LINK_TRAIN_NONE;
2572
    temp &= ~FDI_LINK_TRAIN_NONE;
2444
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2573
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2445
    I915_WRITE(reg, temp);
2574
    I915_WRITE(reg, temp);
2446
 
2575
 
2447
    reg = FDI_RX_CTL(pipe);
2576
    reg = FDI_RX_CTL(pipe);
2448
    temp = I915_READ(reg);
2577
    temp = I915_READ(reg);
2449
    temp &= ~FDI_LINK_TRAIN_NONE;
2578
    temp &= ~FDI_LINK_TRAIN_NONE;
2450
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2579
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2451
    I915_WRITE(reg, temp);
2580
    I915_WRITE(reg, temp);
2452
 
2581
 
2453
    POSTING_READ(reg);
2582
    POSTING_READ(reg);
2454
    udelay(150);
2583
    udelay(150);
2455
 
2584
 
2456
    reg = FDI_RX_IIR(pipe);
2585
    reg = FDI_RX_IIR(pipe);
2457
    for (tries = 0; tries < 5; tries++) {
2586
    for (tries = 0; tries < 5; tries++) {
2458
        temp = I915_READ(reg);
2587
        temp = I915_READ(reg);
2459
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2588
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2460
 
2589
 
2461
        if (temp & FDI_RX_SYMBOL_LOCK) {
2590
        if (temp & FDI_RX_SYMBOL_LOCK) {
2462
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2591
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2463
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2592
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2464
            break;
2593
            break;
2465
        }
2594
        }
2466
    }
2595
    }
2467
    if (tries == 5)
2596
    if (tries == 5)
2468
        DRM_ERROR("FDI train 2 fail!\n");
2597
        DRM_ERROR("FDI train 2 fail!\n");
2469
 
2598
 
2470
    DRM_DEBUG_KMS("FDI train done\n");
2599
    DRM_DEBUG_KMS("FDI train done\n");
2471
 
2600
 
2472
}
2601
}
2473
 
2602
 
2474
static const int snb_b_fdi_train_param[] = {
2603
static const int snb_b_fdi_train_param[] = {
2475
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2604
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2476
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2605
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2477
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2606
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2478
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2607
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2479
};
2608
};
2480
 
2609
 
2481
/* The FDI link training functions for SNB/Cougarpoint. */
2610
/* The FDI link training functions for SNB/Cougarpoint. */
2482
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2611
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2483
{
2612
{
2484
    struct drm_device *dev = crtc->dev;
2613
    struct drm_device *dev = crtc->dev;
2485
    struct drm_i915_private *dev_priv = dev->dev_private;
2614
    struct drm_i915_private *dev_priv = dev->dev_private;
2486
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2615
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2487
    int pipe = intel_crtc->pipe;
2616
    int pipe = intel_crtc->pipe;
2488
	u32 reg, temp, i, retry;
2617
	u32 reg, temp, i, retry;
2489
 
2618
 
2490
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2619
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2491
       for train result */
2620
       for train result */
2492
    reg = FDI_RX_IMR(pipe);
2621
    reg = FDI_RX_IMR(pipe);
2493
    temp = I915_READ(reg);
2622
    temp = I915_READ(reg);
2494
    temp &= ~FDI_RX_SYMBOL_LOCK;
2623
    temp &= ~FDI_RX_SYMBOL_LOCK;
2495
    temp &= ~FDI_RX_BIT_LOCK;
2624
    temp &= ~FDI_RX_BIT_LOCK;
2496
    I915_WRITE(reg, temp);
2625
    I915_WRITE(reg, temp);
2497
 
2626
 
2498
    POSTING_READ(reg);
2627
    POSTING_READ(reg);
2499
    udelay(150);
2628
    udelay(150);
2500
 
2629
 
2501
    /* enable CPU FDI TX and PCH FDI RX */
2630
    /* enable CPU FDI TX and PCH FDI RX */
2502
    reg = FDI_TX_CTL(pipe);
2631
    reg = FDI_TX_CTL(pipe);
2503
    temp = I915_READ(reg);
2632
    temp = I915_READ(reg);
2504
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2633
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2505
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2634
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2506
    temp &= ~FDI_LINK_TRAIN_NONE;
2635
    temp &= ~FDI_LINK_TRAIN_NONE;
2507
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2636
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2508
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2637
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2509
    /* SNB-B */
2638
    /* SNB-B */
2510
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2639
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2511
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2640
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2512
 
2641
 
2513
	I915_WRITE(FDI_RX_MISC(pipe),
2642
	I915_WRITE(FDI_RX_MISC(pipe),
2514
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2643
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2515
 
2644
 
2516
    reg = FDI_RX_CTL(pipe);
2645
    reg = FDI_RX_CTL(pipe);
2517
    temp = I915_READ(reg);
2646
    temp = I915_READ(reg);
2518
    if (HAS_PCH_CPT(dev)) {
2647
    if (HAS_PCH_CPT(dev)) {
2519
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2648
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2520
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2649
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2521
    } else {
2650
    } else {
2522
        temp &= ~FDI_LINK_TRAIN_NONE;
2651
        temp &= ~FDI_LINK_TRAIN_NONE;
2523
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2652
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2524
    }
2653
    }
2525
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2654
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2526
 
2655
 
2527
    POSTING_READ(reg);
2656
    POSTING_READ(reg);
2528
    udelay(150);
2657
    udelay(150);
2529
 
2658
 
2530
	for (i = 0; i < 4; i++) {
2659
	for (i = 0; i < 4; i++) {
2531
        reg = FDI_TX_CTL(pipe);
2660
        reg = FDI_TX_CTL(pipe);
2532
        temp = I915_READ(reg);
2661
        temp = I915_READ(reg);
2533
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2662
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2534
        temp |= snb_b_fdi_train_param[i];
2663
        temp |= snb_b_fdi_train_param[i];
2535
        I915_WRITE(reg, temp);
2664
        I915_WRITE(reg, temp);
2536
 
2665
 
2537
        POSTING_READ(reg);
2666
        POSTING_READ(reg);
2538
        udelay(500);
2667
        udelay(500);
2539
 
2668
 
2540
		for (retry = 0; retry < 5; retry++) {
2669
		for (retry = 0; retry < 5; retry++) {
2541
        reg = FDI_RX_IIR(pipe);
2670
        reg = FDI_RX_IIR(pipe);
2542
        temp = I915_READ(reg);
2671
        temp = I915_READ(reg);
2543
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2672
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2544
        if (temp & FDI_RX_BIT_LOCK) {
2673
        if (temp & FDI_RX_BIT_LOCK) {
2545
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2674
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2546
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2675
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2547
            break;
2676
            break;
2548
        }
2677
        }
2549
			udelay(50);
2678
			udelay(50);
2550
		}
2679
		}
2551
		if (retry < 5)
2680
		if (retry < 5)
2552
			break;
2681
			break;
2553
    }
2682
    }
2554
    if (i == 4)
2683
    if (i == 4)
2555
        DRM_ERROR("FDI train 1 fail!\n");
2684
        DRM_ERROR("FDI train 1 fail!\n");
2556
 
2685
 
2557
    /* Train 2 */
2686
    /* Train 2 */
2558
    reg = FDI_TX_CTL(pipe);
2687
    reg = FDI_TX_CTL(pipe);
2559
    temp = I915_READ(reg);
2688
    temp = I915_READ(reg);
2560
    temp &= ~FDI_LINK_TRAIN_NONE;
2689
    temp &= ~FDI_LINK_TRAIN_NONE;
2561
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2690
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2562
    if (IS_GEN6(dev)) {
2691
    if (IS_GEN6(dev)) {
2563
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2692
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2564
        /* SNB-B */
2693
        /* SNB-B */
2565
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2694
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2566
    }
2695
    }
2567
    I915_WRITE(reg, temp);
2696
    I915_WRITE(reg, temp);
2568
 
2697
 
2569
    reg = FDI_RX_CTL(pipe);
2698
    reg = FDI_RX_CTL(pipe);
2570
    temp = I915_READ(reg);
2699
    temp = I915_READ(reg);
2571
    if (HAS_PCH_CPT(dev)) {
2700
    if (HAS_PCH_CPT(dev)) {
2572
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2701
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2573
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2702
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2574
    } else {
2703
    } else {
2575
        temp &= ~FDI_LINK_TRAIN_NONE;
2704
        temp &= ~FDI_LINK_TRAIN_NONE;
2576
        temp |= FDI_LINK_TRAIN_PATTERN_2;
2705
        temp |= FDI_LINK_TRAIN_PATTERN_2;
2577
    }
2706
    }
2578
    I915_WRITE(reg, temp);
2707
    I915_WRITE(reg, temp);
2579
 
2708
 
2580
    POSTING_READ(reg);
2709
    POSTING_READ(reg);
2581
    udelay(150);
2710
    udelay(150);
2582
 
2711
 
2583
	for (i = 0; i < 4; i++) {
2712
	for (i = 0; i < 4; i++) {
2584
        reg = FDI_TX_CTL(pipe);
2713
        reg = FDI_TX_CTL(pipe);
2585
        temp = I915_READ(reg);
2714
        temp = I915_READ(reg);
2586
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2715
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2587
        temp |= snb_b_fdi_train_param[i];
2716
        temp |= snb_b_fdi_train_param[i];
2588
        I915_WRITE(reg, temp);
2717
        I915_WRITE(reg, temp);
2589
 
2718
 
2590
        POSTING_READ(reg);
2719
        POSTING_READ(reg);
2591
        udelay(500);
2720
        udelay(500);
2592
 
2721
 
2593
		for (retry = 0; retry < 5; retry++) {
2722
		for (retry = 0; retry < 5; retry++) {
2594
        reg = FDI_RX_IIR(pipe);
2723
        reg = FDI_RX_IIR(pipe);
2595
        temp = I915_READ(reg);
2724
        temp = I915_READ(reg);
2596
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2725
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2597
        if (temp & FDI_RX_SYMBOL_LOCK) {
2726
        if (temp & FDI_RX_SYMBOL_LOCK) {
2598
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2727
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2599
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2728
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2600
            break;
2729
            break;
2601
        }
2730
        }
2602
			udelay(50);
2731
			udelay(50);
2603
		}
2732
		}
2604
		if (retry < 5)
2733
		if (retry < 5)
2605
			break;
2734
			break;
2606
    }
2735
    }
2607
    if (i == 4)
2736
    if (i == 4)
2608
        DRM_ERROR("FDI train 2 fail!\n");
2737
        DRM_ERROR("FDI train 2 fail!\n");
2609
 
2738
 
2610
    DRM_DEBUG_KMS("FDI train done.\n");
2739
    DRM_DEBUG_KMS("FDI train done.\n");
2611
}
2740
}
2612
 
2741
 
2613
/* Manual link training for Ivy Bridge A0 parts */
2742
/* Manual link training for Ivy Bridge A0 parts */
2614
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2743
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2615
{
2744
{
2616
    struct drm_device *dev = crtc->dev;
2745
    struct drm_device *dev = crtc->dev;
2617
    struct drm_i915_private *dev_priv = dev->dev_private;
2746
    struct drm_i915_private *dev_priv = dev->dev_private;
2618
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2747
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2619
    int pipe = intel_crtc->pipe;
2748
    int pipe = intel_crtc->pipe;
2620
	u32 reg, temp, i, j;
2749
	u32 reg, temp, i, j;
2621
 
2750
 
2622
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2751
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2623
       for train result */
2752
       for train result */
2624
    reg = FDI_RX_IMR(pipe);
2753
    reg = FDI_RX_IMR(pipe);
2625
    temp = I915_READ(reg);
2754
    temp = I915_READ(reg);
2626
    temp &= ~FDI_RX_SYMBOL_LOCK;
2755
    temp &= ~FDI_RX_SYMBOL_LOCK;
2627
    temp &= ~FDI_RX_BIT_LOCK;
2756
    temp &= ~FDI_RX_BIT_LOCK;
2628
    I915_WRITE(reg, temp);
2757
    I915_WRITE(reg, temp);
2629
 
2758
 
2630
    POSTING_READ(reg);
2759
    POSTING_READ(reg);
2631
    udelay(150);
2760
    udelay(150);
2632
 
2761
 
2633
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2762
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2634
		      I915_READ(FDI_RX_IIR(pipe)));
2763
		      I915_READ(FDI_RX_IIR(pipe)));
2635
 
2764
 
2636
	/* Try each vswing and preemphasis setting twice before moving on */
2765
	/* Try each vswing and preemphasis setting twice before moving on */
2637
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
2766
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
2638
		/* disable first in case we need to retry */
2767
		/* disable first in case we need to retry */
2639
		reg = FDI_TX_CTL(pipe);
2768
		reg = FDI_TX_CTL(pipe);
2640
		temp = I915_READ(reg);
2769
		temp = I915_READ(reg);
2641
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2770
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2642
		temp &= ~FDI_TX_ENABLE;
2771
		temp &= ~FDI_TX_ENABLE;
2643
		I915_WRITE(reg, temp);
2772
		I915_WRITE(reg, temp);
2644
 
2773
 
2645
		reg = FDI_RX_CTL(pipe);
2774
		reg = FDI_RX_CTL(pipe);
2646
		temp = I915_READ(reg);
2775
		temp = I915_READ(reg);
2647
		temp &= ~FDI_LINK_TRAIN_AUTO;
2776
		temp &= ~FDI_LINK_TRAIN_AUTO;
2648
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2777
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2649
		temp &= ~FDI_RX_ENABLE;
2778
		temp &= ~FDI_RX_ENABLE;
2650
		I915_WRITE(reg, temp);
2779
		I915_WRITE(reg, temp);
2651
 
2780
 
2652
    /* enable CPU FDI TX and PCH FDI RX */
2781
    /* enable CPU FDI TX and PCH FDI RX */
2653
    reg = FDI_TX_CTL(pipe);
2782
    reg = FDI_TX_CTL(pipe);
2654
    temp = I915_READ(reg);
2783
    temp = I915_READ(reg);
2655
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2784
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2656
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2785
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2657
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2786
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2658
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2787
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2659
		temp |= snb_b_fdi_train_param[j/2];
2788
		temp |= snb_b_fdi_train_param[j/2];
2660
	temp |= FDI_COMPOSITE_SYNC;
2789
	temp |= FDI_COMPOSITE_SYNC;
2661
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2790
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2662
 
2791
 
2663
	I915_WRITE(FDI_RX_MISC(pipe),
2792
	I915_WRITE(FDI_RX_MISC(pipe),
2664
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2793
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2665
 
2794
 
2666
    reg = FDI_RX_CTL(pipe);
2795
    reg = FDI_RX_CTL(pipe);
2667
    temp = I915_READ(reg);
2796
    temp = I915_READ(reg);
2668
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2797
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2669
	temp |= FDI_COMPOSITE_SYNC;
2798
	temp |= FDI_COMPOSITE_SYNC;
2670
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2799
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2671
 
2800
 
2672
    POSTING_READ(reg);
2801
    POSTING_READ(reg);
2673
		udelay(1); /* should be 0.5us */
2802
		udelay(1); /* should be 0.5us */
2674
 
2803
 
2675
	for (i = 0; i < 4; i++) {
2804
	for (i = 0; i < 4; i++) {
2676
        reg = FDI_RX_IIR(pipe);
2805
        reg = FDI_RX_IIR(pipe);
2677
        temp = I915_READ(reg);
2806
        temp = I915_READ(reg);
2678
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2807
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2679
 
2808
 
2680
        if (temp & FDI_RX_BIT_LOCK ||
2809
        if (temp & FDI_RX_BIT_LOCK ||
2681
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2810
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2682
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2811
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2683
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
2812
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
2684
					      i);
2813
					      i);
2685
            break;
2814
            break;
2686
        }
2815
        }
2687
			udelay(1); /* should be 0.5us */
2816
			udelay(1); /* should be 0.5us */
2688
		}
2817
		}
2689
		if (i == 4) {
2818
		if (i == 4) {
2690
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
2819
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
2691
			continue;
2820
			continue;
2692
    }
2821
    }
2693
 
2822
 
2694
    /* Train 2 */
2823
    /* Train 2 */
2695
    reg = FDI_TX_CTL(pipe);
2824
    reg = FDI_TX_CTL(pipe);
2696
    temp = I915_READ(reg);
2825
    temp = I915_READ(reg);
2697
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2826
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2698
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2827
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2699
    I915_WRITE(reg, temp);
2828
    I915_WRITE(reg, temp);
2700
 
2829
 
2701
    reg = FDI_RX_CTL(pipe);
2830
    reg = FDI_RX_CTL(pipe);
2702
    temp = I915_READ(reg);
2831
    temp = I915_READ(reg);
2703
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2832
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2704
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2833
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2705
    I915_WRITE(reg, temp);
2834
    I915_WRITE(reg, temp);
2706
 
2835
 
2707
    POSTING_READ(reg);
2836
    POSTING_READ(reg);
2708
		udelay(2); /* should be 1.5us */
2837
		udelay(2); /* should be 1.5us */
2709
 
2838
 
2710
	for (i = 0; i < 4; i++) {
2839
	for (i = 0; i < 4; i++) {
2711
        reg = FDI_RX_IIR(pipe);
2840
        reg = FDI_RX_IIR(pipe);
2712
        temp = I915_READ(reg);
2841
        temp = I915_READ(reg);
2713
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2842
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2714
 
2843
 
2715
			if (temp & FDI_RX_SYMBOL_LOCK ||
2844
			if (temp & FDI_RX_SYMBOL_LOCK ||
2716
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2845
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2717
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2846
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2718
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
2847
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
2719
					      i);
2848
					      i);
2720
				goto train_done;
2849
				goto train_done;
2721
        }
2850
        }
2722
			udelay(2); /* should be 1.5us */
2851
			udelay(2); /* should be 1.5us */
2723
    }
2852
    }
2724
    if (i == 4)
2853
    if (i == 4)
2725
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
2854
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
2726
	}
2855
	}
2727
 
2856
 
2728
train_done:
2857
train_done:
2729
    DRM_DEBUG_KMS("FDI train done.\n");
2858
    DRM_DEBUG_KMS("FDI train done.\n");
2730
}
2859
}
2731
 
2860
 
2732
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2861
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2733
{
2862
{
2734
	struct drm_device *dev = intel_crtc->base.dev;
2863
	struct drm_device *dev = intel_crtc->base.dev;
2735
	struct drm_i915_private *dev_priv = dev->dev_private;
2864
	struct drm_i915_private *dev_priv = dev->dev_private;
2736
	int pipe = intel_crtc->pipe;
2865
	int pipe = intel_crtc->pipe;
2737
	u32 reg, temp;
2866
	u32 reg, temp;
2738
 
2867
 
2739
 
2868
 
2740
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2869
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2741
	reg = FDI_RX_CTL(pipe);
2870
	reg = FDI_RX_CTL(pipe);
2742
	temp = I915_READ(reg);
2871
	temp = I915_READ(reg);
2743
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
2872
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
2744
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2873
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2745
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2874
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2746
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2875
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2747
 
2876
 
2748
	POSTING_READ(reg);
2877
	POSTING_READ(reg);
2749
	udelay(200);
2878
	udelay(200);
2750
 
2879
 
2751
	/* Switch from Rawclk to PCDclk */
2880
	/* Switch from Rawclk to PCDclk */
2752
	temp = I915_READ(reg);
2881
	temp = I915_READ(reg);
2753
	I915_WRITE(reg, temp | FDI_PCDCLK);
2882
	I915_WRITE(reg, temp | FDI_PCDCLK);
2754
 
2883
 
2755
	POSTING_READ(reg);
2884
	POSTING_READ(reg);
2756
	udelay(200);
2885
	udelay(200);
2757
 
2886
 
2758
	/* Enable CPU FDI TX PLL, always on for Ironlake */
2887
	/* Enable CPU FDI TX PLL, always on for Ironlake */
2759
	reg = FDI_TX_CTL(pipe);
2888
	reg = FDI_TX_CTL(pipe);
2760
	temp = I915_READ(reg);
2889
	temp = I915_READ(reg);
2761
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2890
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2762
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2891
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2763
 
2892
 
2764
		POSTING_READ(reg);
2893
		POSTING_READ(reg);
2765
		udelay(100);
2894
		udelay(100);
2766
	}
2895
	}
2767
}
2896
}
2768
 
2897
 
2769
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2898
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2770
{
2899
{
2771
	struct drm_device *dev = intel_crtc->base.dev;
2900
	struct drm_device *dev = intel_crtc->base.dev;
2772
	struct drm_i915_private *dev_priv = dev->dev_private;
2901
	struct drm_i915_private *dev_priv = dev->dev_private;
2773
	int pipe = intel_crtc->pipe;
2902
	int pipe = intel_crtc->pipe;
2774
	u32 reg, temp;
2903
	u32 reg, temp;
2775
 
2904
 
2776
	/* Switch from PCDclk to Rawclk */
2905
	/* Switch from PCDclk to Rawclk */
2777
	reg = FDI_RX_CTL(pipe);
2906
	reg = FDI_RX_CTL(pipe);
2778
	temp = I915_READ(reg);
2907
	temp = I915_READ(reg);
2779
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
2908
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
2780
 
2909
 
2781
	/* Disable CPU FDI TX PLL */
2910
	/* Disable CPU FDI TX PLL */
2782
	reg = FDI_TX_CTL(pipe);
2911
	reg = FDI_TX_CTL(pipe);
2783
	temp = I915_READ(reg);
2912
	temp = I915_READ(reg);
2784
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2913
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2785
 
2914
 
2786
	POSTING_READ(reg);
2915
	POSTING_READ(reg);
2787
	udelay(100);
2916
	udelay(100);
2788
 
2917
 
2789
	reg = FDI_RX_CTL(pipe);
2918
	reg = FDI_RX_CTL(pipe);
2790
	temp = I915_READ(reg);
2919
	temp = I915_READ(reg);
2791
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2920
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2792
 
2921
 
2793
	/* Wait for the clocks to turn off. */
2922
	/* Wait for the clocks to turn off. */
2794
	POSTING_READ(reg);
2923
	POSTING_READ(reg);
2795
	udelay(100);
2924
	udelay(100);
2796
}
2925
}
2797
 
2926
 
2798
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2927
static void ironlake_fdi_disable(struct drm_crtc *crtc)
2799
{
2928
{
2800
	struct drm_device *dev = crtc->dev;
2929
	struct drm_device *dev = crtc->dev;
2801
	struct drm_i915_private *dev_priv = dev->dev_private;
2930
	struct drm_i915_private *dev_priv = dev->dev_private;
2802
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2931
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2803
	int pipe = intel_crtc->pipe;
2932
	int pipe = intel_crtc->pipe;
2804
	u32 reg, temp;
2933
	u32 reg, temp;
2805
 
2934
 
2806
	/* disable CPU FDI tx and PCH FDI rx */
2935
	/* disable CPU FDI tx and PCH FDI rx */
2807
	reg = FDI_TX_CTL(pipe);
2936
	reg = FDI_TX_CTL(pipe);
2808
	temp = I915_READ(reg);
2937
	temp = I915_READ(reg);
2809
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2938
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2810
	POSTING_READ(reg);
2939
	POSTING_READ(reg);
2811
 
2940
 
2812
	reg = FDI_RX_CTL(pipe);
2941
	reg = FDI_RX_CTL(pipe);
2813
	temp = I915_READ(reg);
2942
	temp = I915_READ(reg);
2814
	temp &= ~(0x7 << 16);
2943
	temp &= ~(0x7 << 16);
2815
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2944
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2816
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2945
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2817
 
2946
 
2818
	POSTING_READ(reg);
2947
	POSTING_READ(reg);
2819
	udelay(100);
2948
	udelay(100);
2820
 
2949
 
2821
	/* Ironlake workaround, disable clock pointer after downing FDI */
2950
	/* Ironlake workaround, disable clock pointer after downing FDI */
2822
	if (HAS_PCH_IBX(dev)) {
2951
	if (HAS_PCH_IBX(dev)) {
2823
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2952
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2824
	}
2953
	}
2825
 
2954
 
2826
	/* still set train pattern 1 */
2955
	/* still set train pattern 1 */
2827
	reg = FDI_TX_CTL(pipe);
2956
	reg = FDI_TX_CTL(pipe);
2828
	temp = I915_READ(reg);
2957
	temp = I915_READ(reg);
2829
	temp &= ~FDI_LINK_TRAIN_NONE;
2958
	temp &= ~FDI_LINK_TRAIN_NONE;
2830
	temp |= FDI_LINK_TRAIN_PATTERN_1;
2959
	temp |= FDI_LINK_TRAIN_PATTERN_1;
2831
	I915_WRITE(reg, temp);
2960
	I915_WRITE(reg, temp);
2832
 
2961
 
2833
	reg = FDI_RX_CTL(pipe);
2962
	reg = FDI_RX_CTL(pipe);
2834
	temp = I915_READ(reg);
2963
	temp = I915_READ(reg);
2835
	if (HAS_PCH_CPT(dev)) {
2964
	if (HAS_PCH_CPT(dev)) {
2836
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2965
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2837
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2966
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2838
	} else {
2967
	} else {
2839
		temp &= ~FDI_LINK_TRAIN_NONE;
2968
		temp &= ~FDI_LINK_TRAIN_NONE;
2840
		temp |= FDI_LINK_TRAIN_PATTERN_1;
2969
		temp |= FDI_LINK_TRAIN_PATTERN_1;
2841
	}
2970
	}
2842
	/* BPC in FDI rx is consistent with that in PIPECONF */
2971
	/* BPC in FDI rx is consistent with that in PIPECONF */
2843
	temp &= ~(0x07 << 16);
2972
	temp &= ~(0x07 << 16);
2844
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2973
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2845
	I915_WRITE(reg, temp);
2974
	I915_WRITE(reg, temp);
2846
 
2975
 
2847
	POSTING_READ(reg);
2976
	POSTING_READ(reg);
2848
	udelay(100);
2977
	udelay(100);
2849
}
2978
}
2850
 
2979
 
2851
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2980
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2852
{
2981
{
2853
	struct drm_device *dev = crtc->dev;
2982
	struct drm_device *dev = crtc->dev;
2854
	struct drm_i915_private *dev_priv = dev->dev_private;
2983
	struct drm_i915_private *dev_priv = dev->dev_private;
2855
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2984
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2856
	unsigned long flags;
2985
	unsigned long flags;
2857
	bool pending;
2986
	bool pending;
2858
 
2987
 
2859
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2988
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2860
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2989
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2861
		return false;
2990
		return false;
2862
 
2991
 
2863
	spin_lock_irqsave(&dev->event_lock, flags);
2992
	spin_lock_irqsave(&dev->event_lock, flags);
2864
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2993
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2865
	spin_unlock_irqrestore(&dev->event_lock, flags);
2994
	spin_unlock_irqrestore(&dev->event_lock, flags);
2866
 
2995
 
2867
	return pending;
2996
	return pending;
2868
}
2997
}
2869
 
2998
 
2870
#if 0
2999
#if 0
2871
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3000
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2872
{
3001
{
2873
	struct drm_device *dev = crtc->dev;
3002
	struct drm_device *dev = crtc->dev;
2874
	struct drm_i915_private *dev_priv = dev->dev_private;
3003
	struct drm_i915_private *dev_priv = dev->dev_private;
2875
 
3004
 
2876
	if (crtc->fb == NULL)
3005
	if (crtc->fb == NULL)
2877
		return;
3006
		return;
2878
 
3007
 
2879
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3008
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
2880
 
3009
 
2881
	wait_event(dev_priv->pending_flip_queue,
3010
	wait_event(dev_priv->pending_flip_queue,
2882
		   !intel_crtc_has_pending_flip(crtc));
3011
		   !intel_crtc_has_pending_flip(crtc));
2883
 
3012
 
2884
	mutex_lock(&dev->struct_mutex);
3013
	mutex_lock(&dev->struct_mutex);
2885
	intel_finish_fb(crtc->fb);
3014
	intel_finish_fb(crtc->fb);
2886
	mutex_unlock(&dev->struct_mutex);
3015
	mutex_unlock(&dev->struct_mutex);
2887
}
3016
}
2888
#endif
3017
#endif
2889
 
3018
 
2890
/* Program iCLKIP clock to the desired frequency */
3019
/* Program iCLKIP clock to the desired frequency */
2891
static void lpt_program_iclkip(struct drm_crtc *crtc)
3020
static void lpt_program_iclkip(struct drm_crtc *crtc)
2892
{
3021
{
2893
	struct drm_device *dev = crtc->dev;
3022
	struct drm_device *dev = crtc->dev;
2894
	struct drm_i915_private *dev_priv = dev->dev_private;
3023
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3024
	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2895
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3025
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
2896
	u32 temp;
3026
	u32 temp;
2897
 
3027
 
2898
	mutex_lock(&dev_priv->dpio_lock);
3028
	mutex_lock(&dev_priv->dpio_lock);
2899
 
3029
 
2900
	/* It is necessary to ungate the pixclk gate prior to programming
3030
	/* It is necessary to ungate the pixclk gate prior to programming
2901
	 * the divisors, and gate it back when it is done.
3031
	 * the divisors, and gate it back when it is done.
2902
	 */
3032
	 */
2903
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3033
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2904
 
3034
 
2905
	/* Disable SSCCTL */
3035
	/* Disable SSCCTL */
2906
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3036
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
2907
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3037
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
2908
				SBI_SSCCTL_DISABLE,
3038
				SBI_SSCCTL_DISABLE,
2909
			SBI_ICLK);
3039
			SBI_ICLK);
2910
 
3040
 
2911
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
3041
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
2912
	if (crtc->mode.clock == 20000) {
3042
	if (clock == 20000) {
2913
		auxdiv = 1;
3043
		auxdiv = 1;
2914
		divsel = 0x41;
3044
		divsel = 0x41;
2915
		phaseinc = 0x20;
3045
		phaseinc = 0x20;
2916
	} else {
3046
	} else {
2917
		/* The iCLK virtual clock root frequency is in MHz,
3047
		/* The iCLK virtual clock root frequency is in MHz,
2918
		 * but the crtc->mode.clock in in KHz. To get the divisors,
3048
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
2919
		 * it is necessary to divide one by another, so we
3049
		 * divisors, it is necessary to divide one by another, so we
2920
		 * convert the virtual clock precision to KHz here for higher
3050
		 * convert the virtual clock precision to KHz here for higher
2921
		 * precision.
3051
		 * precision.
2922
		 */
3052
		 */
2923
		u32 iclk_virtual_root_freq = 172800 * 1000;
3053
		u32 iclk_virtual_root_freq = 172800 * 1000;
2924
		u32 iclk_pi_range = 64;
3054
		u32 iclk_pi_range = 64;
2925
		u32 desired_divisor, msb_divisor_value, pi_value;
3055
		u32 desired_divisor, msb_divisor_value, pi_value;
2926
 
3056
 
2927
		desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
3057
		desired_divisor = (iclk_virtual_root_freq / clock);
2928
		msb_divisor_value = desired_divisor / iclk_pi_range;
3058
		msb_divisor_value = desired_divisor / iclk_pi_range;
2929
		pi_value = desired_divisor % iclk_pi_range;
3059
		pi_value = desired_divisor % iclk_pi_range;
2930
 
3060
 
2931
		auxdiv = 0;
3061
		auxdiv = 0;
2932
		divsel = msb_divisor_value - 2;
3062
		divsel = msb_divisor_value - 2;
2933
		phaseinc = pi_value;
3063
		phaseinc = pi_value;
2934
	}
3064
	}
2935
 
3065
 
2936
	/* This should not happen with any sane values */
3066
	/* This should not happen with any sane values */
2937
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3067
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2938
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3068
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2939
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3069
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
2940
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3070
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2941
 
3071
 
2942
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3072
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2943
			crtc->mode.clock,
3073
			clock,
2944
			auxdiv,
3074
			auxdiv,
2945
			divsel,
3075
			divsel,
2946
			phasedir,
3076
			phasedir,
2947
			phaseinc);
3077
			phaseinc);
2948
 
3078
 
2949
	/* Program SSCDIVINTPHASE6 */
3079
	/* Program SSCDIVINTPHASE6 */
2950
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3080
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2951
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3081
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2952
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3082
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2953
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3083
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2954
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3084
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2955
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3085
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2956
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3086
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2957
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3087
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2958
 
3088
 
2959
	/* Program SSCAUXDIV */
3089
	/* Program SSCAUXDIV */
2960
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3090
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2961
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3091
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2962
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3092
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2963
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3093
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2964
 
3094
 
2965
	/* Enable modulator and associated divider */
3095
	/* Enable modulator and associated divider */
2966
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3096
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2967
	temp &= ~SBI_SSCCTL_DISABLE;
3097
	temp &= ~SBI_SSCCTL_DISABLE;
2968
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3098
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2969
 
3099
 
2970
	/* Wait for initialization time */
3100
	/* Wait for initialization time */
2971
	udelay(24);
3101
	udelay(24);
2972
 
3102
 
2973
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3103
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2974
 
3104
 
2975
	mutex_unlock(&dev_priv->dpio_lock);
3105
	mutex_unlock(&dev_priv->dpio_lock);
2976
}
3106
}
2977
 
3107
 
2978
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3108
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
2979
						enum pipe pch_transcoder)
3109
						enum pipe pch_transcoder)
2980
{
3110
{
2981
	struct drm_device *dev = crtc->base.dev;
3111
	struct drm_device *dev = crtc->base.dev;
2982
	struct drm_i915_private *dev_priv = dev->dev_private;
3112
	struct drm_i915_private *dev_priv = dev->dev_private;
2983
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3113
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
2984
 
3114
 
2985
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3115
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
2986
		   I915_READ(HTOTAL(cpu_transcoder)));
3116
		   I915_READ(HTOTAL(cpu_transcoder)));
2987
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3117
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
2988
		   I915_READ(HBLANK(cpu_transcoder)));
3118
		   I915_READ(HBLANK(cpu_transcoder)));
2989
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3119
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
2990
		   I915_READ(HSYNC(cpu_transcoder)));
3120
		   I915_READ(HSYNC(cpu_transcoder)));
2991
 
3121
 
2992
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3122
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
2993
		   I915_READ(VTOTAL(cpu_transcoder)));
3123
		   I915_READ(VTOTAL(cpu_transcoder)));
2994
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3124
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
2995
		   I915_READ(VBLANK(cpu_transcoder)));
3125
		   I915_READ(VBLANK(cpu_transcoder)));
2996
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3126
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
2997
		   I915_READ(VSYNC(cpu_transcoder)));
3127
		   I915_READ(VSYNC(cpu_transcoder)));
2998
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3128
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2999
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3129
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3000
}
3130
}
3001
 
3131
 
3002
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3132
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3003
{
3133
{
3004
	struct drm_i915_private *dev_priv = dev->dev_private;
3134
	struct drm_i915_private *dev_priv = dev->dev_private;
3005
	uint32_t temp;
3135
	uint32_t temp;
3006
 
3136
 
3007
	temp = I915_READ(SOUTH_CHICKEN1);
3137
	temp = I915_READ(SOUTH_CHICKEN1);
3008
	if (temp & FDI_BC_BIFURCATION_SELECT)
3138
	if (temp & FDI_BC_BIFURCATION_SELECT)
3009
		return;
3139
		return;
3010
 
3140
 
3011
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3141
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3012
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3142
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3013
 
3143
 
3014
	temp |= FDI_BC_BIFURCATION_SELECT;
3144
	temp |= FDI_BC_BIFURCATION_SELECT;
3015
	DRM_DEBUG_KMS("enabling fdi C rx\n");
3145
	DRM_DEBUG_KMS("enabling fdi C rx\n");
3016
	I915_WRITE(SOUTH_CHICKEN1, temp);
3146
	I915_WRITE(SOUTH_CHICKEN1, temp);
3017
	POSTING_READ(SOUTH_CHICKEN1);
3147
	POSTING_READ(SOUTH_CHICKEN1);
3018
}
3148
}
3019
 
3149
 
3020
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3150
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3021
{
3151
{
3022
	struct drm_device *dev = intel_crtc->base.dev;
3152
	struct drm_device *dev = intel_crtc->base.dev;
3023
	struct drm_i915_private *dev_priv = dev->dev_private;
3153
	struct drm_i915_private *dev_priv = dev->dev_private;
3024
 
3154
 
3025
	switch (intel_crtc->pipe) {
3155
	switch (intel_crtc->pipe) {
3026
	case PIPE_A:
3156
	case PIPE_A:
3027
		break;
3157
		break;
3028
	case PIPE_B:
3158
	case PIPE_B:
3029
		if (intel_crtc->config.fdi_lanes > 2)
3159
		if (intel_crtc->config.fdi_lanes > 2)
3030
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3160
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3031
		else
3161
		else
3032
			cpt_enable_fdi_bc_bifurcation(dev);
3162
			cpt_enable_fdi_bc_bifurcation(dev);
3033
 
3163
 
3034
		break;
3164
		break;
3035
	case PIPE_C:
3165
	case PIPE_C:
3036
		cpt_enable_fdi_bc_bifurcation(dev);
3166
		cpt_enable_fdi_bc_bifurcation(dev);
3037
 
3167
 
3038
		break;
3168
		break;
3039
	default:
3169
	default:
3040
		BUG();
3170
		BUG();
3041
	}
3171
	}
3042
}
3172
}
3043
 
3173
 
3044
/*
3174
/*
3045
 * Enable PCH resources required for PCH ports:
3175
 * Enable PCH resources required for PCH ports:
3046
 *   - PCH PLLs
3176
 *   - PCH PLLs
3047
 *   - FDI training & RX/TX
3177
 *   - FDI training & RX/TX
3048
 *   - update transcoder timings
3178
 *   - update transcoder timings
3049
 *   - DP transcoding bits
3179
 *   - DP transcoding bits
3050
 *   - transcoder
3180
 *   - transcoder
3051
 */
3181
 */
3052
static void ironlake_pch_enable(struct drm_crtc *crtc)
3182
static void ironlake_pch_enable(struct drm_crtc *crtc)
3053
{
3183
{
3054
	struct drm_device *dev = crtc->dev;
3184
	struct drm_device *dev = crtc->dev;
3055
	struct drm_i915_private *dev_priv = dev->dev_private;
3185
	struct drm_i915_private *dev_priv = dev->dev_private;
3056
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3186
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3057
	int pipe = intel_crtc->pipe;
3187
	int pipe = intel_crtc->pipe;
3058
	u32 reg, temp;
3188
	u32 reg, temp;
3059
 
3189
 
3060
	assert_pch_transcoder_disabled(dev_priv, pipe);
3190
	assert_pch_transcoder_disabled(dev_priv, pipe);
3061
 
3191
 
3062
	if (IS_IVYBRIDGE(dev))
3192
	if (IS_IVYBRIDGE(dev))
3063
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3193
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3064
 
3194
 
3065
	/* Write the TU size bits before fdi link training, so that error
3195
	/* Write the TU size bits before fdi link training, so that error
3066
	 * detection works. */
3196
	 * detection works. */
3067
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3197
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3068
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3198
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3069
 
3199
 
3070
	/* For PCH output, training FDI link */
3200
	/* For PCH output, training FDI link */
3071
	dev_priv->display.fdi_link_train(crtc);
3201
	dev_priv->display.fdi_link_train(crtc);
3072
 
3202
 
3073
	/* We need to program the right clock selection before writing the pixel
3203
	/* We need to program the right clock selection before writing the pixel
3074
	 * mutliplier into the DPLL. */
3204
	 * mutliplier into the DPLL. */
3075
	if (HAS_PCH_CPT(dev)) {
3205
	if (HAS_PCH_CPT(dev)) {
3076
		u32 sel;
3206
		u32 sel;
3077
 
3207
 
3078
		temp = I915_READ(PCH_DPLL_SEL);
3208
		temp = I915_READ(PCH_DPLL_SEL);
3079
		temp |= TRANS_DPLL_ENABLE(pipe);
3209
		temp |= TRANS_DPLL_ENABLE(pipe);
3080
		sel = TRANS_DPLLB_SEL(pipe);
3210
		sel = TRANS_DPLLB_SEL(pipe);
3081
		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3211
		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3082
			temp |= sel;
3212
			temp |= sel;
3083
		else
3213
		else
3084
			temp &= ~sel;
3214
			temp &= ~sel;
3085
		I915_WRITE(PCH_DPLL_SEL, temp);
3215
		I915_WRITE(PCH_DPLL_SEL, temp);
3086
	}
3216
	}
3087
 
3217
 
3088
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3218
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3089
	 * transcoder, and we actually should do this to not upset any PCH
3219
	 * transcoder, and we actually should do this to not upset any PCH
3090
	 * transcoder that already use the clock when we share it.
3220
	 * transcoder that already use the clock when we share it.
3091
	 *
3221
	 *
3092
	 * Note that enable_shared_dpll tries to do the right thing, but
3222
	 * Note that enable_shared_dpll tries to do the right thing, but
3093
	 * get_shared_dpll unconditionally resets the pll - we need that to have
3223
	 * get_shared_dpll unconditionally resets the pll - we need that to have
3094
	 * the right LVDS enable sequence. */
3224
	 * the right LVDS enable sequence. */
3095
	ironlake_enable_shared_dpll(intel_crtc);
3225
	ironlake_enable_shared_dpll(intel_crtc);
3096
 
3226
 
3097
	/* set transcoder timing, panel must allow it */
3227
	/* set transcoder timing, panel must allow it */
3098
	assert_panel_unlocked(dev_priv, pipe);
3228
	assert_panel_unlocked(dev_priv, pipe);
3099
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3229
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3100
 
3230
 
3101
	intel_fdi_normal_train(crtc);
3231
	intel_fdi_normal_train(crtc);
3102
 
3232
 
3103
	/* For PCH DP, enable TRANS_DP_CTL */
3233
	/* For PCH DP, enable TRANS_DP_CTL */
3104
	if (HAS_PCH_CPT(dev) &&
3234
	if (HAS_PCH_CPT(dev) &&
3105
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3235
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3106
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3236
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3107
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3237
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3108
		reg = TRANS_DP_CTL(pipe);
3238
		reg = TRANS_DP_CTL(pipe);
3109
		temp = I915_READ(reg);
3239
		temp = I915_READ(reg);
3110
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3240
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3111
			  TRANS_DP_SYNC_MASK |
3241
			  TRANS_DP_SYNC_MASK |
3112
			  TRANS_DP_BPC_MASK);
3242
			  TRANS_DP_BPC_MASK);
3113
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3243
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3114
			 TRANS_DP_ENH_FRAMING);
3244
			 TRANS_DP_ENH_FRAMING);
3115
		temp |= bpc << 9; /* same format but at 11:9 */
3245
		temp |= bpc << 9; /* same format but at 11:9 */
3116
 
3246
 
3117
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3247
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3118
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3248
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3119
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3249
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3120
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3250
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3121
 
3251
 
3122
		switch (intel_trans_dp_port_sel(crtc)) {
3252
		switch (intel_trans_dp_port_sel(crtc)) {
3123
		case PCH_DP_B:
3253
		case PCH_DP_B:
3124
			temp |= TRANS_DP_PORT_SEL_B;
3254
			temp |= TRANS_DP_PORT_SEL_B;
3125
			break;
3255
			break;
3126
		case PCH_DP_C:
3256
		case PCH_DP_C:
3127
			temp |= TRANS_DP_PORT_SEL_C;
3257
			temp |= TRANS_DP_PORT_SEL_C;
3128
			break;
3258
			break;
3129
		case PCH_DP_D:
3259
		case PCH_DP_D:
3130
			temp |= TRANS_DP_PORT_SEL_D;
3260
			temp |= TRANS_DP_PORT_SEL_D;
3131
			break;
3261
			break;
3132
		default:
3262
		default:
3133
			BUG();
3263
			BUG();
3134
		}
3264
		}
3135
 
3265
 
3136
		I915_WRITE(reg, temp);
3266
		I915_WRITE(reg, temp);
3137
	}
3267
	}
3138
 
3268
 
3139
	ironlake_enable_pch_transcoder(dev_priv, pipe);
3269
	ironlake_enable_pch_transcoder(dev_priv, pipe);
3140
}
3270
}
3141
 
3271
 
3142
static void lpt_pch_enable(struct drm_crtc *crtc)
3272
static void lpt_pch_enable(struct drm_crtc *crtc)
3143
{
3273
{
3144
	struct drm_device *dev = crtc->dev;
3274
	struct drm_device *dev = crtc->dev;
3145
	struct drm_i915_private *dev_priv = dev->dev_private;
3275
	struct drm_i915_private *dev_priv = dev->dev_private;
3146
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3276
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3147
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3277
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3148
 
3278
 
3149
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3279
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3150
 
3280
 
3151
	lpt_program_iclkip(crtc);
3281
	lpt_program_iclkip(crtc);
3152
 
3282
 
3153
	/* Set transcoder timing. */
3283
	/* Set transcoder timing. */
3154
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3284
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3155
 
3285
 
3156
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3286
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3157
}
3287
}
3158
 
3288
 
3159
static void intel_put_shared_dpll(struct intel_crtc *crtc)
3289
static void intel_put_shared_dpll(struct intel_crtc *crtc)
3160
{
3290
{
3161
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3291
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3162
 
3292
 
3163
	if (pll == NULL)
3293
	if (pll == NULL)
3164
		return;
3294
		return;
3165
 
3295
 
3166
	if (pll->refcount == 0) {
3296
	if (pll->refcount == 0) {
3167
		WARN(1, "bad %s refcount\n", pll->name);
3297
		WARN(1, "bad %s refcount\n", pll->name);
3168
		return;
3298
		return;
3169
	}
3299
	}
3170
 
3300
 
3171
	if (--pll->refcount == 0) {
3301
	if (--pll->refcount == 0) {
3172
		WARN_ON(pll->on);
3302
		WARN_ON(pll->on);
3173
		WARN_ON(pll->active);
3303
		WARN_ON(pll->active);
3174
	}
3304
	}
3175
 
3305
 
3176
	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3306
	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3177
}
3307
}
3178
 
3308
 
3179
static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3309
static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3180
{
3310
{
3181
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3311
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3182
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3312
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3183
	enum intel_dpll_id i;
3313
	enum intel_dpll_id i;
3184
 
3314
 
3185
	if (pll) {
3315
	if (pll) {
3186
		DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3316
		DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3187
			      crtc->base.base.id, pll->name);
3317
			      crtc->base.base.id, pll->name);
3188
		intel_put_shared_dpll(crtc);
3318
		intel_put_shared_dpll(crtc);
3189
	}
3319
	}
3190
 
3320
 
3191
	if (HAS_PCH_IBX(dev_priv->dev)) {
3321
	if (HAS_PCH_IBX(dev_priv->dev)) {
3192
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3322
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3193
		i = (enum intel_dpll_id) crtc->pipe;
3323
		i = (enum intel_dpll_id) crtc->pipe;
3194
		pll = &dev_priv->shared_dplls[i];
3324
		pll = &dev_priv->shared_dplls[i];
3195
 
3325
 
3196
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3326
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3197
			      crtc->base.base.id, pll->name);
3327
			      crtc->base.base.id, pll->name);
3198
 
3328
 
3199
		goto found;
3329
		goto found;
3200
	}
3330
	}
3201
 
3331
 
3202
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3332
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3203
		pll = &dev_priv->shared_dplls[i];
3333
		pll = &dev_priv->shared_dplls[i];
3204
 
3334
 
3205
		/* Only want to check enabled timings first */
3335
		/* Only want to check enabled timings first */
3206
		if (pll->refcount == 0)
3336
		if (pll->refcount == 0)
3207
			continue;
3337
			continue;
3208
 
3338
 
3209
		if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3339
		if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3210
			   sizeof(pll->hw_state)) == 0) {
3340
			   sizeof(pll->hw_state)) == 0) {
3211
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3341
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3212
				      crtc->base.base.id,
3342
				      crtc->base.base.id,
3213
				      pll->name, pll->refcount, pll->active);
3343
				      pll->name, pll->refcount, pll->active);
3214
 
3344
 
3215
			goto found;
3345
			goto found;
3216
		}
3346
		}
3217
	}
3347
	}
3218
 
3348
 
3219
	/* Ok no matching timings, maybe there's a free one? */
3349
	/* Ok no matching timings, maybe there's a free one? */
3220
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3350
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3221
		pll = &dev_priv->shared_dplls[i];
3351
		pll = &dev_priv->shared_dplls[i];
3222
		if (pll->refcount == 0) {
3352
		if (pll->refcount == 0) {
3223
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3353
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3224
				      crtc->base.base.id, pll->name);
3354
				      crtc->base.base.id, pll->name);
3225
			goto found;
3355
			goto found;
3226
		}
3356
		}
3227
	}
3357
	}
3228
 
3358
 
3229
	return NULL;
3359
	return NULL;
3230
 
3360
 
3231
found:
3361
found:
3232
	crtc->config.shared_dpll = i;
3362
	crtc->config.shared_dpll = i;
3233
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3363
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3234
			 pipe_name(crtc->pipe));
3364
			 pipe_name(crtc->pipe));
3235
 
3365
 
3236
	if (pll->active == 0) {
3366
	if (pll->active == 0) {
3237
		memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
3367
		memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
3238
		       sizeof(pll->hw_state));
3368
		       sizeof(pll->hw_state));
3239
 
3369
 
3240
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
3370
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
3241
		WARN_ON(pll->on);
3371
		WARN_ON(pll->on);
3242
		assert_shared_dpll_disabled(dev_priv, pll);
3372
		assert_shared_dpll_disabled(dev_priv, pll);
3243
 
3373
 
3244
		pll->mode_set(dev_priv, pll);
3374
		pll->mode_set(dev_priv, pll);
3245
	}
3375
	}
3246
	pll->refcount++;
3376
	pll->refcount++;
3247
 
3377
 
3248
	return pll;
3378
	return pll;
3249
}
3379
}
3250
 
3380
 
3251
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3381
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3252
{
3382
{
3253
	struct drm_i915_private *dev_priv = dev->dev_private;
3383
	struct drm_i915_private *dev_priv = dev->dev_private;
3254
	int dslreg = PIPEDSL(pipe);
3384
	int dslreg = PIPEDSL(pipe);
3255
	u32 temp;
3385
	u32 temp;
3256
 
3386
 
3257
	temp = I915_READ(dslreg);
3387
	temp = I915_READ(dslreg);
3258
	udelay(500);
3388
	udelay(500);
3259
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
3389
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
3260
		if (wait_for(I915_READ(dslreg) != temp, 5))
3390
		if (wait_for(I915_READ(dslreg) != temp, 5))
3261
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3391
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3262
	}
3392
	}
3263
}
3393
}
3264
 
3394
 
3265
static void ironlake_pfit_enable(struct intel_crtc *crtc)
3395
static void ironlake_pfit_enable(struct intel_crtc *crtc)
3266
{
3396
{
3267
	struct drm_device *dev = crtc->base.dev;
3397
	struct drm_device *dev = crtc->base.dev;
3268
	struct drm_i915_private *dev_priv = dev->dev_private;
3398
	struct drm_i915_private *dev_priv = dev->dev_private;
3269
	int pipe = crtc->pipe;
3399
	int pipe = crtc->pipe;
3270
 
3400
 
3271
	if (crtc->config.pch_pfit.enabled) {
3401
	if (crtc->config.pch_pfit.enabled) {
3272
		/* Force use of hard-coded filter coefficients
3402
		/* Force use of hard-coded filter coefficients
3273
		 * as some pre-programmed values are broken,
3403
		 * as some pre-programmed values are broken,
3274
		 * e.g. x201.
3404
		 * e.g. x201.
3275
		 */
3405
		 */
3276
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3406
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3277
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3407
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3278
						 PF_PIPE_SEL_IVB(pipe));
3408
						 PF_PIPE_SEL_IVB(pipe));
3279
		else
3409
		else
3280
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3410
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3281
		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3411
		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3282
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3412
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3283
	}
3413
	}
3284
}
3414
}
3285
 
3415
 
3286
static void intel_enable_planes(struct drm_crtc *crtc)
3416
static void intel_enable_planes(struct drm_crtc *crtc)
3287
{
3417
{
3288
	struct drm_device *dev = crtc->dev;
3418
	struct drm_device *dev = crtc->dev;
3289
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3419
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3290
	struct intel_plane *intel_plane;
3420
	struct intel_plane *intel_plane;
3291
 
3421
 
3292
	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3422
	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3293
		if (intel_plane->pipe == pipe)
3423
		if (intel_plane->pipe == pipe)
3294
			intel_plane_restore(&intel_plane->base);
3424
			intel_plane_restore(&intel_plane->base);
3295
}
3425
}
3296
 
3426
 
3297
static void intel_disable_planes(struct drm_crtc *crtc)
3427
static void intel_disable_planes(struct drm_crtc *crtc)
3298
{
3428
{
3299
	struct drm_device *dev = crtc->dev;
3429
	struct drm_device *dev = crtc->dev;
3300
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3430
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3301
	struct intel_plane *intel_plane;
3431
	struct intel_plane *intel_plane;
3302
 
3432
 
3303
	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3433
	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3304
		if (intel_plane->pipe == pipe)
3434
		if (intel_plane->pipe == pipe)
3305
			intel_plane_disable(&intel_plane->base);
3435
			intel_plane_disable(&intel_plane->base);
3306
}
3436
}
-
 
3437
 
-
 
3438
void hsw_enable_ips(struct intel_crtc *crtc)
-
 
3439
{
-
 
3440
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
 
3441
 
-
 
3442
	if (!crtc->config.ips_enabled)
-
 
3443
		return;
-
 
3444
 
-
 
3445
	/* We can only enable IPS after we enable a plane and wait for a vblank.
-
 
3446
	 * We guarantee that the plane is enabled by calling intel_enable_ips
-
 
3447
	 * only after intel_enable_plane. And intel_enable_plane already waits
-
 
3448
	 * for a vblank, so all we need to do here is to enable the IPS bit. */
-
 
3449
	assert_plane_enabled(dev_priv, crtc->plane);
-
 
3450
	if (IS_BROADWELL(crtc->base.dev)) {
-
 
3451
		mutex_lock(&dev_priv->rps.hw_lock);
-
 
3452
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
-
 
3453
		mutex_unlock(&dev_priv->rps.hw_lock);
-
 
3454
		/* Quoting Art Runyan: "its not safe to expect any particular
-
 
3455
		 * value in IPS_CTL bit 31 after enabling IPS through the
-
 
3456
		 * mailbox." Moreover, the mailbox may return a bogus state,
-
 
3457
		 * so we need to just enable it and continue on.
-
 
3458
		 */
-
 
3459
	} else {
-
 
3460
		I915_WRITE(IPS_CTL, IPS_ENABLE);
-
 
3461
		/* The bit only becomes 1 in the next vblank, so this wait here
-
 
3462
		 * is essentially intel_wait_for_vblank. If we don't have this
-
 
3463
		 * and don't wait for vblanks until the end of crtc_enable, then
-
 
3464
		 * the HW state readout code will complain that the expected
-
 
3465
		 * IPS_CTL value is not the one we read. */
-
 
3466
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
-
 
3467
			DRM_ERROR("Timed out waiting for IPS enable\n");
-
 
3468
	}
-
 
3469
}
-
 
3470
 
-
 
3471
void hsw_disable_ips(struct intel_crtc *crtc)
-
 
3472
{
-
 
3473
	struct drm_device *dev = crtc->base.dev;
-
 
3474
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3475
 
-
 
3476
	if (!crtc->config.ips_enabled)
-
 
3477
		return;
-
 
3478
 
-
 
3479
	assert_plane_enabled(dev_priv, crtc->plane);
-
 
3480
	if (IS_BROADWELL(crtc->base.dev)) {
-
 
3481
		mutex_lock(&dev_priv->rps.hw_lock);
-
 
3482
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
-
 
3483
		mutex_unlock(&dev_priv->rps.hw_lock);
-
 
3484
	} else {
-
 
3485
		I915_WRITE(IPS_CTL, 0);
-
 
3486
		POSTING_READ(IPS_CTL);
-
 
3487
	}
-
 
3488
 
-
 
3489
	/* We need to wait for a vblank before we can disable the plane. */
-
 
3490
	intel_wait_for_vblank(dev, crtc->pipe);
-
 
3491
}
-
 
3492
 
-
 
3493
/** Loads the palette/gamma unit for the CRTC with the prepared values */
-
 
3494
static void intel_crtc_load_lut(struct drm_crtc *crtc)
-
 
3495
{
-
 
3496
	struct drm_device *dev = crtc->dev;
-
 
3497
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3498
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
3499
	enum pipe pipe = intel_crtc->pipe;
-
 
3500
	int palreg = PALETTE(pipe);
-
 
3501
	int i;
-
 
3502
	bool reenable_ips = false;
-
 
3503
 
-
 
3504
	/* The clocks have to be on to load the palette. */
-
 
3505
	if (!crtc->enabled || !intel_crtc->active)
-
 
3506
		return;
-
 
3507
 
-
 
3508
	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
-
 
3509
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
-
 
3510
			assert_dsi_pll_enabled(dev_priv);
-
 
3511
		else
-
 
3512
			assert_pll_enabled(dev_priv, pipe);
-
 
3513
	}
-
 
3514
 
-
 
3515
	/* use legacy palette for Ironlake */
-
 
3516
	if (HAS_PCH_SPLIT(dev))
-
 
3517
		palreg = LGC_PALETTE(pipe);
-
 
3518
 
-
 
3519
	/* Workaround : Do not read or write the pipe palette/gamma data while
-
 
3520
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
-
 
3521
	 */
-
 
3522
	if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
-
 
3523
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
-
 
3524
	     GAMMA_MODE_MODE_SPLIT)) {
-
 
3525
		hsw_disable_ips(intel_crtc);
-
 
3526
		reenable_ips = true;
-
 
3527
	}
-
 
3528
 
-
 
3529
	for (i = 0; i < 256; i++) {
-
 
3530
		I915_WRITE(palreg + 4 * i,
-
 
3531
			   (intel_crtc->lut_r[i] << 16) |
-
 
3532
			   (intel_crtc->lut_g[i] << 8) |
-
 
3533
			   intel_crtc->lut_b[i]);
-
 
3534
	}
-
 
3535
 
-
 
3536
	if (reenable_ips)
-
 
3537
		hsw_enable_ips(intel_crtc);
-
 
3538
}
3307
 
3539
 
3308
static void ironlake_crtc_enable(struct drm_crtc *crtc)
3540
static void ironlake_crtc_enable(struct drm_crtc *crtc)
3309
{
3541
{
3310
    struct drm_device *dev = crtc->dev;
3542
    struct drm_device *dev = crtc->dev;
3311
    struct drm_i915_private *dev_priv = dev->dev_private;
3543
    struct drm_i915_private *dev_priv = dev->dev_private;
3312
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3544
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3313
	struct intel_encoder *encoder;
3545
	struct intel_encoder *encoder;
3314
    int pipe = intel_crtc->pipe;
3546
    int pipe = intel_crtc->pipe;
3315
    int plane = intel_crtc->plane;
3547
    int plane = intel_crtc->plane;
3316
 
3548
 
3317
	WARN_ON(!crtc->enabled);
3549
	WARN_ON(!crtc->enabled);
3318
 
3550
 
3319
    if (intel_crtc->active)
3551
    if (intel_crtc->active)
3320
        return;
3552
        return;
3321
 
3553
 
3322
    intel_crtc->active = true;
3554
    intel_crtc->active = true;
3323
 
3555
 
3324
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3556
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3325
	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3557
	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3326
 
-
 
3327
    intel_update_watermarks(dev);
-
 
3328
 
3558
 
3329
	for_each_encoder_on_crtc(dev, crtc, encoder)
3559
	for_each_encoder_on_crtc(dev, crtc, encoder)
3330
		if (encoder->pre_enable)
3560
		if (encoder->pre_enable)
3331
			encoder->pre_enable(encoder);
3561
			encoder->pre_enable(encoder);
3332
 
3562
 
3333
	if (intel_crtc->config.has_pch_encoder) {
3563
	if (intel_crtc->config.has_pch_encoder) {
3334
		/* Note: FDI PLL enabling _must_ be done before we enable the
3564
		/* Note: FDI PLL enabling _must_ be done before we enable the
3335
		 * cpu pipes, hence this is separate from all the other fdi/pch
3565
		 * cpu pipes, hence this is separate from all the other fdi/pch
3336
		 * enabling. */
3566
		 * enabling. */
3337
		ironlake_fdi_pll_enable(intel_crtc);
3567
		ironlake_fdi_pll_enable(intel_crtc);
3338
	} else {
3568
	} else {
3339
		assert_fdi_tx_disabled(dev_priv, pipe);
3569
		assert_fdi_tx_disabled(dev_priv, pipe);
3340
		assert_fdi_rx_disabled(dev_priv, pipe);
3570
		assert_fdi_rx_disabled(dev_priv, pipe);
3341
	}
3571
	}
3342
 
3572
 
3343
	ironlake_pfit_enable(intel_crtc);
3573
	ironlake_pfit_enable(intel_crtc);
3344
 
3574
 
3345
    /*
3575
    /*
3346
     * On ILK+ LUT must be loaded before the pipe is running but with
3576
     * On ILK+ LUT must be loaded before the pipe is running but with
3347
     * clocks enabled
3577
     * clocks enabled
3348
     */
3578
     */
3349
    intel_crtc_load_lut(crtc);
3579
    intel_crtc_load_lut(crtc);
-
 
3580
 
3350
 
3581
	intel_update_watermarks(crtc);
3351
	intel_enable_pipe(dev_priv, pipe,
3582
	intel_enable_pipe(dev_priv, pipe,
3352
			  intel_crtc->config.has_pch_encoder);
3583
			  intel_crtc->config.has_pch_encoder, false);
3353
    intel_enable_plane(dev_priv, plane, pipe);
3584
	intel_enable_primary_plane(dev_priv, plane, pipe);
3354
	intel_enable_planes(crtc);
3585
	intel_enable_planes(crtc);
3355
	intel_crtc_update_cursor(crtc, true);
3586
	intel_crtc_update_cursor(crtc, true);
3356
 
3587
 
3357
	if (intel_crtc->config.has_pch_encoder)
3588
	if (intel_crtc->config.has_pch_encoder)
3358
        ironlake_pch_enable(crtc);
3589
        ironlake_pch_enable(crtc);
3359
 
3590
 
3360
    mutex_lock(&dev->struct_mutex);
3591
    mutex_lock(&dev->struct_mutex);
3361
    intel_update_fbc(dev);
3592
    intel_update_fbc(dev);
3362
    mutex_unlock(&dev->struct_mutex);
3593
    mutex_unlock(&dev->struct_mutex);
3363
 
3594
 
3364
	for_each_encoder_on_crtc(dev, crtc, encoder)
3595
	for_each_encoder_on_crtc(dev, crtc, encoder)
3365
		encoder->enable(encoder);
3596
		encoder->enable(encoder);
3366
 
3597
 
3367
	if (HAS_PCH_CPT(dev))
3598
	if (HAS_PCH_CPT(dev))
3368
		cpt_verify_modeset(dev, intel_crtc->pipe);
3599
		cpt_verify_modeset(dev, intel_crtc->pipe);
3369
 
3600
 
3370
	/*
3601
	/*
3371
	 * There seems to be a race in PCH platform hw (at least on some
3602
	 * There seems to be a race in PCH platform hw (at least on some
3372
	 * outputs) where an enabled pipe still completes any pageflip right
3603
	 * outputs) where an enabled pipe still completes any pageflip right
3373
	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3604
	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3374
	 * as the first vblank happend, everything works as expected. Hence just
3605
	 * as the first vblank happend, everything works as expected. Hence just
3375
	 * wait for one vblank before returning to avoid strange things
3606
	 * wait for one vblank before returning to avoid strange things
3376
	 * happening.
3607
	 * happening.
3377
	 */
3608
	 */
3378
	intel_wait_for_vblank(dev, intel_crtc->pipe);
3609
	intel_wait_for_vblank(dev, intel_crtc->pipe);
3379
}
3610
}
3380
 
3611
 
3381
/* IPS only exists on ULT machines and is tied to pipe A. */
3612
/* IPS only exists on ULT machines and is tied to pipe A. */
3382
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3613
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3383
{
3614
{
3384
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3615
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3385
}
3616
}
3386
 
3617
 
3387
static void hsw_enable_ips(struct intel_crtc *crtc)
3618
static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
-
 
3619
{
3388
{
3620
	struct drm_device *dev = crtc->dev;
-
 
3621
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3622
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
3623
	int pipe = intel_crtc->pipe;
-
 
3624
	int plane = intel_crtc->plane;
3389
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3625
 
3390
 
3626
	intel_enable_primary_plane(dev_priv, plane, pipe);
3391
	if (!crtc->config.ips_enabled)
-
 
3392
		return;
-
 
3393
 
3627
	intel_enable_planes(crtc);
-
 
3628
	intel_crtc_update_cursor(crtc, true);
3394
	/* We can only enable IPS after we enable a plane and wait for a vblank.
3629
 
3395
	 * We guarantee that the plane is enabled by calling intel_enable_ips
3630
	hsw_enable_ips(intel_crtc);
3396
	 * only after intel_enable_plane. And intel_enable_plane already waits
3631
 
3397
	 * for a vblank, so all we need to do here is to enable the IPS bit. */
3632
	mutex_lock(&dev->struct_mutex);
3398
	assert_plane_enabled(dev_priv, crtc->plane);
3633
	intel_update_fbc(dev);
3399
	I915_WRITE(IPS_CTL, IPS_ENABLE);
3634
	mutex_unlock(&dev->struct_mutex);
3400
}
3635
}
3401
 
3636
 
-
 
3637
static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
-
 
3638
{
-
 
3639
	struct drm_device *dev = crtc->dev;
-
 
3640
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3641
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
3642
	int pipe = intel_crtc->pipe;
-
 
3643
	int plane = intel_crtc->plane;
3402
static void hsw_disable_ips(struct intel_crtc *crtc)
3644
 
-
 
3645
//   intel_crtc_wait_for_pending_flips(crtc);
-
 
3646
//   drm_vblank_off(dev, pipe);
-
 
3647
 
-
 
3648
	/* FBC must be disabled before disabling the plane on HSW. */
-
 
3649
	if (dev_priv->fbc.plane == plane)
-
 
3650
		intel_disable_fbc(dev);
-
 
3651
 
-
 
3652
	hsw_disable_ips(intel_crtc);
-
 
3653
 
-
 
3654
	intel_crtc_update_cursor(crtc, false);
-
 
3655
	intel_disable_planes(crtc);
-
 
3656
	intel_disable_primary_plane(dev_priv, plane, pipe);
-
 
3657
}
-
 
3658
 
-
 
3659
/*
-
 
3660
 * This implements the workaround described in the "notes" section of the mode
-
 
3661
 * set sequence documentation. When going from no pipes or single pipe to
-
 
3662
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
-
 
3663
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
-
 
3664
 */
-
 
3665
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
-
 
3666
{
-
 
3667
	struct drm_device *dev = crtc->base.dev;
-
 
3668
	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
-
 
3669
 
-
 
3670
	/* We want to get the other_active_crtc only if there's only 1 other
-
 
3671
	 * active crtc. */
3403
{
3672
	list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
3404
	struct drm_device *dev = crtc->base.dev;
3673
		if (!crtc_it->active || crtc_it == crtc)
-
 
3674
			continue;
3405
	struct drm_i915_private *dev_priv = dev->dev_private;
3675
 
-
 
3676
		if (other_active_crtc)
3406
 
3677
		return;
3407
	if (!crtc->config.ips_enabled)
3678
 
3408
		return;
3679
		other_active_crtc = crtc_it;
3409
 
3680
	}
3410
	assert_plane_enabled(dev_priv, crtc->plane);
3681
	if (!other_active_crtc)
3411
	I915_WRITE(IPS_CTL, 0);
3682
		return;
3412
 
3683
 
3413
	/* We need to wait for a vblank before we can disable the plane. */
3684
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
3414
	intel_wait_for_vblank(dev, crtc->pipe);
3685
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
3415
}
3686
}
3416
 
3687
 
3417
static void haswell_crtc_enable(struct drm_crtc *crtc)
3688
static void haswell_crtc_enable(struct drm_crtc *crtc)
3418
{
3689
{
3419
	struct drm_device *dev = crtc->dev;
3690
	struct drm_device *dev = crtc->dev;
3420
	struct drm_i915_private *dev_priv = dev->dev_private;
3691
	struct drm_i915_private *dev_priv = dev->dev_private;
3421
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3692
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3422
	struct intel_encoder *encoder;
3693
	struct intel_encoder *encoder;
3423
	int pipe = intel_crtc->pipe;
3694
	int pipe = intel_crtc->pipe;
3424
	int plane = intel_crtc->plane;
-
 
3425
 
3695
 
3426
	WARN_ON(!crtc->enabled);
3696
	WARN_ON(!crtc->enabled);
3427
 
3697
 
3428
	if (intel_crtc->active)
3698
	if (intel_crtc->active)
3429
		return;
3699
		return;
3430
 
3700
 
3431
	intel_crtc->active = true;
3701
	intel_crtc->active = true;
3432
 
3702
 
3433
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3703
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3434
	if (intel_crtc->config.has_pch_encoder)
3704
	if (intel_crtc->config.has_pch_encoder)
3435
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3705
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3436
 
-
 
3437
	intel_update_watermarks(dev);
-
 
3438
 
3706
 
3439
	if (intel_crtc->config.has_pch_encoder)
3707
	if (intel_crtc->config.has_pch_encoder)
3440
		dev_priv->display.fdi_link_train(crtc);
3708
		dev_priv->display.fdi_link_train(crtc);
3441
 
3709
 
3442
	for_each_encoder_on_crtc(dev, crtc, encoder)
3710
	for_each_encoder_on_crtc(dev, crtc, encoder)
3443
		if (encoder->pre_enable)
3711
		if (encoder->pre_enable)
3444
			encoder->pre_enable(encoder);
3712
			encoder->pre_enable(encoder);
3445
 
3713
 
3446
	intel_ddi_enable_pipe_clock(intel_crtc);
3714
	intel_ddi_enable_pipe_clock(intel_crtc);
3447
 
3715
 
3448
	ironlake_pfit_enable(intel_crtc);
3716
	ironlake_pfit_enable(intel_crtc);
3449
 
3717
 
3450
	/*
3718
	/*
3451
	 * On ILK+ LUT must be loaded before the pipe is running but with
3719
	 * On ILK+ LUT must be loaded before the pipe is running but with
3452
	 * clocks enabled
3720
	 * clocks enabled
3453
	 */
3721
	 */
3454
	intel_crtc_load_lut(crtc);
3722
	intel_crtc_load_lut(crtc);
3455
 
3723
 
3456
	intel_ddi_set_pipe_settings(crtc);
3724
	intel_ddi_set_pipe_settings(crtc);
3457
	intel_ddi_enable_transcoder_func(crtc);
3725
	intel_ddi_enable_transcoder_func(crtc);
-
 
3726
 
3458
 
3727
	intel_update_watermarks(crtc);
3459
	intel_enable_pipe(dev_priv, pipe,
3728
	intel_enable_pipe(dev_priv, pipe,
3460
			  intel_crtc->config.has_pch_encoder);
-
 
3461
	intel_enable_plane(dev_priv, plane, pipe);
-
 
3462
	intel_enable_planes(crtc);
-
 
3463
	intel_crtc_update_cursor(crtc, true);
-
 
3464
 
-
 
3465
	hsw_enable_ips(intel_crtc);
3729
			  intel_crtc->config.has_pch_encoder, false);
3466
 
3730
 
3467
	if (intel_crtc->config.has_pch_encoder)
3731
	if (intel_crtc->config.has_pch_encoder)
3468
		lpt_pch_enable(crtc);
3732
		lpt_pch_enable(crtc);
3469
 
-
 
3470
	mutex_lock(&dev->struct_mutex);
-
 
3471
	intel_update_fbc(dev);
-
 
3472
	mutex_unlock(&dev->struct_mutex);
-
 
3473
 
3733
 
3474
	for_each_encoder_on_crtc(dev, crtc, encoder)
3734
	for_each_encoder_on_crtc(dev, crtc, encoder) {
-
 
3735
		encoder->enable(encoder);
-
 
3736
		intel_opregion_notify_encoder(encoder, true);
-
 
3737
	}
-
 
3738
 
-
 
3739
	/* If we change the relative order between pipe/planes enabling, we need
-
 
3740
	 * to change the workaround. */
-
 
3741
	haswell_mode_set_planes_workaround(intel_crtc);
3475
		encoder->enable(encoder);
3742
	haswell_crtc_enable_planes(crtc);
3476
 
3743
 
3477
	/*
3744
	/*
3478
	 * There seems to be a race in PCH platform hw (at least on some
3745
	 * There seems to be a race in PCH platform hw (at least on some
3479
	 * outputs) where an enabled pipe still completes any pageflip right
3746
	 * outputs) where an enabled pipe still completes any pageflip right
3480
	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3747
	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3481
	 * as the first vblank happend, everything works as expected. Hence just
3748
	 * as the first vblank happend, everything works as expected. Hence just
3482
	 * wait for one vblank before returning to avoid strange things
3749
	 * wait for one vblank before returning to avoid strange things
3483
	 * happening.
3750
	 * happening.
3484
	 */
3751
	 */
3485
	intel_wait_for_vblank(dev, intel_crtc->pipe);
3752
	intel_wait_for_vblank(dev, intel_crtc->pipe);
3486
}
3753
}
3487
 
3754
 
3488
static void ironlake_pfit_disable(struct intel_crtc *crtc)
3755
static void ironlake_pfit_disable(struct intel_crtc *crtc)
3489
{
3756
{
3490
	struct drm_device *dev = crtc->base.dev;
3757
	struct drm_device *dev = crtc->base.dev;
3491
	struct drm_i915_private *dev_priv = dev->dev_private;
3758
	struct drm_i915_private *dev_priv = dev->dev_private;
3492
	int pipe = crtc->pipe;
3759
	int pipe = crtc->pipe;
3493
 
3760
 
3494
	/* To avoid upsetting the power well on haswell only disable the pfit if
3761
	/* To avoid upsetting the power well on haswell only disable the pfit if
3495
	 * it's in use. The hw state code will make sure we get this right. */
3762
	 * it's in use. The hw state code will make sure we get this right. */
3496
	if (crtc->config.pch_pfit.enabled) {
3763
	if (crtc->config.pch_pfit.enabled) {
3497
		I915_WRITE(PF_CTL(pipe), 0);
3764
		I915_WRITE(PF_CTL(pipe), 0);
3498
		I915_WRITE(PF_WIN_POS(pipe), 0);
3765
		I915_WRITE(PF_WIN_POS(pipe), 0);
3499
		I915_WRITE(PF_WIN_SZ(pipe), 0);
3766
		I915_WRITE(PF_WIN_SZ(pipe), 0);
3500
	}
3767
	}
3501
}
3768
}
3502
 
3769
 
3503
static void ironlake_crtc_disable(struct drm_crtc *crtc)
3770
static void ironlake_crtc_disable(struct drm_crtc *crtc)
3504
{
3771
{
3505
    struct drm_device *dev = crtc->dev;
3772
    struct drm_device *dev = crtc->dev;
3506
    struct drm_i915_private *dev_priv = dev->dev_private;
3773
    struct drm_i915_private *dev_priv = dev->dev_private;
3507
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3774
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3508
	struct intel_encoder *encoder;
3775
	struct intel_encoder *encoder;
3509
    int pipe = intel_crtc->pipe;
3776
    int pipe = intel_crtc->pipe;
3510
    int plane = intel_crtc->plane;
3777
    int plane = intel_crtc->plane;
3511
    u32 reg, temp;
3778
    u32 reg, temp;
3512
 
3779
 
3513
 
3780
 
3514
    if (!intel_crtc->active)
3781
    if (!intel_crtc->active)
3515
        return;
3782
        return;
3516
 
3783
 
3517
	for_each_encoder_on_crtc(dev, crtc, encoder)
3784
	for_each_encoder_on_crtc(dev, crtc, encoder)
3518
		encoder->disable(encoder);
3785
		encoder->disable(encoder);
3519
 
3786
 
3520
//    intel_crtc_wait_for_pending_flips(crtc);
3787
//    intel_crtc_wait_for_pending_flips(crtc);
3521
//    drm_vblank_off(dev, pipe);
3788
//    drm_vblank_off(dev, pipe);
3522
 
3789
 
3523
	if (dev_priv->fbc.plane == plane)
3790
	if (dev_priv->fbc.plane == plane)
3524
		intel_disable_fbc(dev);
3791
		intel_disable_fbc(dev);
3525
 
3792
 
3526
	intel_crtc_update_cursor(crtc, false);
3793
	intel_crtc_update_cursor(crtc, false);
3527
	intel_disable_planes(crtc);
3794
	intel_disable_planes(crtc);
3528
    intel_disable_plane(dev_priv, plane, pipe);
3795
	intel_disable_primary_plane(dev_priv, plane, pipe);
3529
 
3796
 
3530
	if (intel_crtc->config.has_pch_encoder)
3797
	if (intel_crtc->config.has_pch_encoder)
3531
		intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
3798
		intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
3532
 
3799
 
3533
    intel_disable_pipe(dev_priv, pipe);
3800
    intel_disable_pipe(dev_priv, pipe);
3534
 
3801
 
3535
	ironlake_pfit_disable(intel_crtc);
3802
	ironlake_pfit_disable(intel_crtc);
3536
 
3803
 
3537
	for_each_encoder_on_crtc(dev, crtc, encoder)
3804
	for_each_encoder_on_crtc(dev, crtc, encoder)
3538
		if (encoder->post_disable)
3805
		if (encoder->post_disable)
3539
			encoder->post_disable(encoder);
3806
			encoder->post_disable(encoder);
3540
 
3807
 
3541
	if (intel_crtc->config.has_pch_encoder) {
3808
	if (intel_crtc->config.has_pch_encoder) {
3542
    ironlake_fdi_disable(crtc);
3809
    ironlake_fdi_disable(crtc);
3543
 
3810
 
3544
	ironlake_disable_pch_transcoder(dev_priv, pipe);
3811
	ironlake_disable_pch_transcoder(dev_priv, pipe);
3545
		intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3812
		intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3546
 
3813
 
3547
    if (HAS_PCH_CPT(dev)) {
3814
    if (HAS_PCH_CPT(dev)) {
3548
        /* disable TRANS_DP_CTL */
3815
        /* disable TRANS_DP_CTL */
3549
        reg = TRANS_DP_CTL(pipe);
3816
        reg = TRANS_DP_CTL(pipe);
3550
        temp = I915_READ(reg);
3817
        temp = I915_READ(reg);
3551
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3818
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3552
				  TRANS_DP_PORT_SEL_MASK);
3819
				  TRANS_DP_PORT_SEL_MASK);
3553
        temp |= TRANS_DP_PORT_SEL_NONE;
3820
        temp |= TRANS_DP_PORT_SEL_NONE;
3554
        I915_WRITE(reg, temp);
3821
        I915_WRITE(reg, temp);
3555
 
3822
 
3556
        /* disable DPLL_SEL */
3823
        /* disable DPLL_SEL */
3557
        temp = I915_READ(PCH_DPLL_SEL);
3824
        temp = I915_READ(PCH_DPLL_SEL);
3558
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3825
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3559
        I915_WRITE(PCH_DPLL_SEL, temp);
3826
        I915_WRITE(PCH_DPLL_SEL, temp);
3560
    }
3827
    }
3561
 
3828
 
3562
    /* disable PCH DPLL */
3829
    /* disable PCH DPLL */
3563
		intel_disable_shared_dpll(intel_crtc);
3830
		intel_disable_shared_dpll(intel_crtc);
3564
 
3831
 
3565
	ironlake_fdi_pll_disable(intel_crtc);
3832
	ironlake_fdi_pll_disable(intel_crtc);
3566
	}
3833
	}
3567
 
3834
 
3568
    intel_crtc->active = false;
3835
    intel_crtc->active = false;
3569
    intel_update_watermarks(dev);
3836
	intel_update_watermarks(crtc);
3570
 
3837
 
3571
    mutex_lock(&dev->struct_mutex);
3838
    mutex_lock(&dev->struct_mutex);
3572
    intel_update_fbc(dev);
3839
    intel_update_fbc(dev);
3573
    mutex_unlock(&dev->struct_mutex);
3840
    mutex_unlock(&dev->struct_mutex);
3574
}
3841
}
3575
 
3842
 
3576
static void haswell_crtc_disable(struct drm_crtc *crtc)
3843
static void haswell_crtc_disable(struct drm_crtc *crtc)
3577
{
3844
{
3578
	struct drm_device *dev = crtc->dev;
3845
	struct drm_device *dev = crtc->dev;
3579
	struct drm_i915_private *dev_priv = dev->dev_private;
3846
	struct drm_i915_private *dev_priv = dev->dev_private;
3580
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3847
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3581
	struct intel_encoder *encoder;
3848
	struct intel_encoder *encoder;
3582
	int pipe = intel_crtc->pipe;
3849
	int pipe = intel_crtc->pipe;
3583
	int plane = intel_crtc->plane;
-
 
3584
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3850
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3585
 
3851
 
3586
	if (!intel_crtc->active)
3852
	if (!intel_crtc->active)
3587
		return;
3853
		return;
3588
 
-
 
3589
	for_each_encoder_on_crtc(dev, crtc, encoder)
-
 
3590
		encoder->disable(encoder);
-
 
3591
 
-
 
3592
 
-
 
3593
	/* FBC must be disabled before disabling the plane on HSW. */
-
 
3594
	if (dev_priv->fbc.plane == plane)
3854
 
3595
		intel_disable_fbc(dev);
3855
	haswell_crtc_disable_planes(crtc);
3596
 
-
 
3597
	hsw_disable_ips(intel_crtc);
3856
 
3598
 
3857
	for_each_encoder_on_crtc(dev, crtc, encoder) {
3599
	intel_crtc_update_cursor(crtc, false);
-
 
-
 
3858
		intel_opregion_notify_encoder(encoder, false);
3600
	intel_disable_planes(crtc);
3859
		encoder->disable(encoder);
3601
	intel_disable_plane(dev_priv, plane, pipe);
3860
	}
3602
 
3861
 
3603
	if (intel_crtc->config.has_pch_encoder)
3862
	if (intel_crtc->config.has_pch_encoder)
3604
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
3863
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
3605
	intel_disable_pipe(dev_priv, pipe);
3864
	intel_disable_pipe(dev_priv, pipe);
3606
 
3865
 
3607
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3866
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3608
 
3867
 
3609
	ironlake_pfit_disable(intel_crtc);
3868
	ironlake_pfit_disable(intel_crtc);
3610
 
3869
 
3611
	intel_ddi_disable_pipe_clock(intel_crtc);
3870
	intel_ddi_disable_pipe_clock(intel_crtc);
3612
 
3871
 
3613
	for_each_encoder_on_crtc(dev, crtc, encoder)
3872
	for_each_encoder_on_crtc(dev, crtc, encoder)
3614
		if (encoder->post_disable)
3873
		if (encoder->post_disable)
3615
			encoder->post_disable(encoder);
3874
			encoder->post_disable(encoder);
3616
 
3875
 
3617
	if (intel_crtc->config.has_pch_encoder) {
3876
	if (intel_crtc->config.has_pch_encoder) {
3618
		lpt_disable_pch_transcoder(dev_priv);
3877
		lpt_disable_pch_transcoder(dev_priv);
3619
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3878
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3620
		intel_ddi_fdi_disable(crtc);
3879
		intel_ddi_fdi_disable(crtc);
3621
	}
3880
	}
3622
 
3881
 
3623
	intel_crtc->active = false;
3882
	intel_crtc->active = false;
3624
	intel_update_watermarks(dev);
3883
	intel_update_watermarks(crtc);
3625
 
3884
 
3626
	mutex_lock(&dev->struct_mutex);
3885
	mutex_lock(&dev->struct_mutex);
3627
	intel_update_fbc(dev);
3886
	intel_update_fbc(dev);
3628
	mutex_unlock(&dev->struct_mutex);
3887
	mutex_unlock(&dev->struct_mutex);
3629
}
3888
}
3630
 
3889
 
3631
static void ironlake_crtc_off(struct drm_crtc *crtc)
3890
static void ironlake_crtc_off(struct drm_crtc *crtc)
3632
{
3891
{
3633
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3892
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3634
	intel_put_shared_dpll(intel_crtc);
3893
	intel_put_shared_dpll(intel_crtc);
3635
}
3894
}
3636
 
3895
 
3637
static void haswell_crtc_off(struct drm_crtc *crtc)
3896
static void haswell_crtc_off(struct drm_crtc *crtc)
3638
{
3897
{
3639
	intel_ddi_put_crtc_pll(crtc);
3898
	intel_ddi_put_crtc_pll(crtc);
3640
}
3899
}
3641
 
3900
 
3642
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3901
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3643
{
3902
{
3644
	if (!enable && intel_crtc->overlay) {
3903
	if (!enable && intel_crtc->overlay) {
3645
		struct drm_device *dev = intel_crtc->base.dev;
3904
		struct drm_device *dev = intel_crtc->base.dev;
3646
		struct drm_i915_private *dev_priv = dev->dev_private;
3905
		struct drm_i915_private *dev_priv = dev->dev_private;
3647
 
3906
 
3648
		mutex_lock(&dev->struct_mutex);
3907
		mutex_lock(&dev->struct_mutex);
3649
		dev_priv->mm.interruptible = false;
3908
		dev_priv->mm.interruptible = false;
3650
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3909
//       (void) intel_overlay_switch_off(intel_crtc->overlay);
3651
		dev_priv->mm.interruptible = true;
3910
		dev_priv->mm.interruptible = true;
3652
		mutex_unlock(&dev->struct_mutex);
3911
		mutex_unlock(&dev->struct_mutex);
3653
	}
3912
	}
3654
 
3913
 
3655
	/* Let userspace switch the overlay on again. In most cases userspace
3914
	/* Let userspace switch the overlay on again. In most cases userspace
3656
	 * has to recompute where to put it anyway.
3915
	 * has to recompute where to put it anyway.
3657
	 */
3916
	 */
3658
}
3917
}
3659
 
3918
 
3660
/**
3919
/**
3661
 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3920
 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3662
 * cursor plane briefly if not already running after enabling the display
3921
 * cursor plane briefly if not already running after enabling the display
3663
 * plane.
3922
 * plane.
3664
 * This workaround avoids occasional blank screens when self refresh is
3923
 * This workaround avoids occasional blank screens when self refresh is
3665
 * enabled.
3924
 * enabled.
3666
 */
3925
 */
3667
static void
3926
static void
3668
g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3927
g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3669
{
3928
{
3670
	u32 cntl = I915_READ(CURCNTR(pipe));
3929
	u32 cntl = I915_READ(CURCNTR(pipe));
3671
 
3930
 
3672
	if ((cntl & CURSOR_MODE) == 0) {
3931
	if ((cntl & CURSOR_MODE) == 0) {
3673
		u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3932
		u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3674
 
3933
 
3675
		I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3934
		I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3676
		I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3935
		I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3677
		intel_wait_for_vblank(dev_priv->dev, pipe);
3936
		intel_wait_for_vblank(dev_priv->dev, pipe);
3678
		I915_WRITE(CURCNTR(pipe), cntl);
3937
		I915_WRITE(CURCNTR(pipe), cntl);
3679
		I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3938
		I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3680
		I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3939
		I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3681
	}
3940
	}
3682
}
3941
}
3683
 
3942
 
3684
static void i9xx_pfit_enable(struct intel_crtc *crtc)
3943
static void i9xx_pfit_enable(struct intel_crtc *crtc)
3685
{
3944
{
3686
	struct drm_device *dev = crtc->base.dev;
3945
	struct drm_device *dev = crtc->base.dev;
3687
	struct drm_i915_private *dev_priv = dev->dev_private;
3946
	struct drm_i915_private *dev_priv = dev->dev_private;
3688
	struct intel_crtc_config *pipe_config = &crtc->config;
3947
	struct intel_crtc_config *pipe_config = &crtc->config;
3689
 
3948
 
3690
	if (!crtc->config.gmch_pfit.control)
3949
	if (!crtc->config.gmch_pfit.control)
3691
		return;
3950
		return;
3692
 
3951
 
3693
	/*
3952
	/*
3694
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3953
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3695
	 * according to register description and PRM.
3954
	 * according to register description and PRM.
3696
	 */
3955
	 */
3697
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
3956
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
3698
	assert_pipe_disabled(dev_priv, crtc->pipe);
3957
	assert_pipe_disabled(dev_priv, crtc->pipe);
3699
 
3958
 
3700
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
3959
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
3701
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
3960
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
3702
 
3961
 
3703
	/* Border color in case we don't scale up to the full screen. Black by
3962
	/* Border color in case we don't scale up to the full screen. Black by
3704
	 * default, change to something else for debugging. */
3963
	 * default, change to something else for debugging. */
3705
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
3964
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
3706
}
3965
}
-
 
3966
 
-
 
3967
int valleyview_get_vco(struct drm_i915_private *dev_priv)
-
 
3968
{
-
 
3969
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
-
 
3970
 
-
 
3971
	/* Obtain SKU information */
-
 
3972
	mutex_lock(&dev_priv->dpio_lock);
-
 
3973
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
-
 
3974
		CCK_FUSE_HPLL_FREQ_MASK;
-
 
3975
	mutex_unlock(&dev_priv->dpio_lock);
-
 
3976
 
-
 
3977
	return vco_freq[hpll_freq];
-
 
3978
}
-
 
3979
 
-
 
3980
/* Adjust CDclk dividers to allow high res or save power if possible */
-
 
3981
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
-
 
3982
{
-
 
3983
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3984
	u32 val, cmd;
-
 
3985
 
-
 
3986
	if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
-
 
3987
		cmd = 2;
-
 
3988
	else if (cdclk == 266)
-
 
3989
		cmd = 1;
-
 
3990
	else
-
 
3991
		cmd = 0;
-
 
3992
 
-
 
3993
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
3994
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
-
 
3995
	val &= ~DSPFREQGUAR_MASK;
-
 
3996
	val |= (cmd << DSPFREQGUAR_SHIFT);
-
 
3997
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
-
 
3998
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
-
 
3999
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
-
 
4000
		     50)) {
-
 
4001
		DRM_ERROR("timed out waiting for CDclk change\n");
-
 
4002
	}
-
 
4003
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
4004
 
-
 
4005
	if (cdclk == 400) {
-
 
4006
		u32 divider, vco;
-
 
4007
 
-
 
4008
		vco = valleyview_get_vco(dev_priv);
-
 
4009
		divider = ((vco << 1) / cdclk) - 1;
-
 
4010
 
-
 
4011
		mutex_lock(&dev_priv->dpio_lock);
-
 
4012
		/* adjust cdclk divider */
-
 
4013
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
-
 
4014
		val &= ~0xf;
-
 
4015
		val |= divider;
-
 
4016
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
-
 
4017
		mutex_unlock(&dev_priv->dpio_lock);
-
 
4018
	}
-
 
4019
 
-
 
4020
	mutex_lock(&dev_priv->dpio_lock);
-
 
4021
	/* adjust self-refresh exit latency value */
-
 
4022
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
-
 
4023
	val &= ~0x7f;
-
 
4024
 
-
 
4025
	/*
-
 
4026
	 * For high bandwidth configs, we set a higher latency in the bunit
-
 
4027
	 * so that the core display fetch happens in time to avoid underruns.
-
 
4028
	 */
-
 
4029
	if (cdclk == 400)
-
 
4030
		val |= 4500 / 250; /* 4.5 usec */
-
 
4031
	else
-
 
4032
		val |= 3000 / 250; /* 3.0 usec */
-
 
4033
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
-
 
4034
	mutex_unlock(&dev_priv->dpio_lock);
-
 
4035
 
-
 
4036
	/* Since we changed the CDclk, we need to update the GMBUSFREQ too */
-
 
4037
	intel_i2c_reset(dev);
-
 
4038
}
-
 
4039
 
-
 
4040
static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
-
 
4041
{
-
 
4042
	int cur_cdclk, vco;
-
 
4043
	int divider;
-
 
4044
 
-
 
4045
	vco = valleyview_get_vco(dev_priv);
-
 
4046
 
-
 
4047
	mutex_lock(&dev_priv->dpio_lock);
-
 
4048
	divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
-
 
4049
	mutex_unlock(&dev_priv->dpio_lock);
-
 
4050
 
-
 
4051
	divider &= 0xf;
-
 
4052
 
-
 
4053
	cur_cdclk = (vco << 1) / (divider + 1);
-
 
4054
 
-
 
4055
	return cur_cdclk;
-
 
4056
}
-
 
4057
 
-
 
4058
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
-
 
4059
				 int max_pixclk)
-
 
4060
{
-
 
4061
	int cur_cdclk;
-
 
4062
 
-
 
4063
	cur_cdclk = valleyview_cur_cdclk(dev_priv);
-
 
4064
 
-
 
4065
	/*
-
 
4066
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
-
 
4067
	 *   200MHz
-
 
4068
	 *   267MHz
-
 
4069
	 *   320MHz
-
 
4070
	 *   400MHz
-
 
4071
	 * So we check to see whether we're above 90% of the lower bin and
-
 
4072
	 * adjust if needed.
-
 
4073
	 */
-
 
4074
	if (max_pixclk > 288000) {
-
 
4075
		return 400;
-
 
4076
	} else if (max_pixclk > 240000) {
-
 
4077
		return 320;
-
 
4078
	} else
-
 
4079
		return 266;
-
 
4080
	/* Looks like the 200MHz CDclk freq doesn't work on some configs */
-
 
4081
}
-
 
4082
 
-
 
4083
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
-
 
4084
				 unsigned modeset_pipes,
-
 
4085
				 struct intel_crtc_config *pipe_config)
-
 
4086
{
-
 
4087
	struct drm_device *dev = dev_priv->dev;
-
 
4088
	struct intel_crtc *intel_crtc;
-
 
4089
	int max_pixclk = 0;
-
 
4090
 
-
 
4091
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
-
 
4092
			    base.head) {
-
 
4093
		if (modeset_pipes & (1 << intel_crtc->pipe))
-
 
4094
			max_pixclk = max(max_pixclk,
-
 
4095
					 pipe_config->adjusted_mode.crtc_clock);
-
 
4096
		else if (intel_crtc->base.enabled)
-
 
4097
			max_pixclk = max(max_pixclk,
-
 
4098
					 intel_crtc->config.adjusted_mode.crtc_clock);
-
 
4099
	}
-
 
4100
 
-
 
4101
	return max_pixclk;
-
 
4102
}
-
 
4103
 
-
 
4104
static void valleyview_modeset_global_pipes(struct drm_device *dev,
-
 
4105
					    unsigned *prepare_pipes,
-
 
4106
					    unsigned modeset_pipes,
-
 
4107
					    struct intel_crtc_config *pipe_config)
-
 
4108
{
-
 
4109
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4110
	struct intel_crtc *intel_crtc;
-
 
4111
	int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
-
 
4112
					       pipe_config);
-
 
4113
	int cur_cdclk = valleyview_cur_cdclk(dev_priv);
-
 
4114
 
-
 
4115
	if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
-
 
4116
		return;
-
 
4117
 
-
 
4118
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
-
 
4119
			    base.head)
-
 
4120
		if (intel_crtc->base.enabled)
-
 
4121
			*prepare_pipes |= (1 << intel_crtc->pipe);
-
 
4122
}
-
 
4123
 
-
 
4124
static void valleyview_modeset_global_resources(struct drm_device *dev)
-
 
4125
{
-
 
4126
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4127
	int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
-
 
4128
	int cur_cdclk = valleyview_cur_cdclk(dev_priv);
-
 
4129
	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
-
 
4130
 
-
 
4131
	if (req_cdclk != cur_cdclk)
-
 
4132
		valleyview_set_cdclk(dev, req_cdclk);
-
 
4133
}
3707
 
4134
 
3708
static void valleyview_crtc_enable(struct drm_crtc *crtc)
4135
static void valleyview_crtc_enable(struct drm_crtc *crtc)
3709
{
4136
{
3710
	struct drm_device *dev = crtc->dev;
4137
	struct drm_device *dev = crtc->dev;
3711
	struct drm_i915_private *dev_priv = dev->dev_private;
4138
	struct drm_i915_private *dev_priv = dev->dev_private;
3712
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4139
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3713
	struct intel_encoder *encoder;
4140
	struct intel_encoder *encoder;
3714
	int pipe = intel_crtc->pipe;
4141
	int pipe = intel_crtc->pipe;
3715
	int plane = intel_crtc->plane;
4142
	int plane = intel_crtc->plane;
-
 
4143
	bool is_dsi;
3716
 
4144
 
3717
	WARN_ON(!crtc->enabled);
4145
	WARN_ON(!crtc->enabled);
3718
 
4146
 
3719
	if (intel_crtc->active)
4147
	if (intel_crtc->active)
3720
		return;
4148
		return;
3721
 
4149
 
3722
	intel_crtc->active = true;
4150
	intel_crtc->active = true;
3723
	intel_update_watermarks(dev);
-
 
3724
 
4151
 
3725
	for_each_encoder_on_crtc(dev, crtc, encoder)
4152
	for_each_encoder_on_crtc(dev, crtc, encoder)
3726
		if (encoder->pre_pll_enable)
4153
		if (encoder->pre_pll_enable)
3727
			encoder->pre_pll_enable(encoder);
4154
			encoder->pre_pll_enable(encoder);
-
 
4155
 
-
 
4156
	is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
-
 
4157
 
3728
 
4158
	if (!is_dsi)
3729
	vlv_enable_pll(intel_crtc);
4159
	vlv_enable_pll(intel_crtc);
3730
 
4160
 
3731
	for_each_encoder_on_crtc(dev, crtc, encoder)
4161
	for_each_encoder_on_crtc(dev, crtc, encoder)
3732
		if (encoder->pre_enable)
4162
		if (encoder->pre_enable)
3733
			encoder->pre_enable(encoder);
4163
			encoder->pre_enable(encoder);
3734
 
4164
 
3735
	i9xx_pfit_enable(intel_crtc);
4165
	i9xx_pfit_enable(intel_crtc);
3736
 
4166
 
3737
	intel_crtc_load_lut(crtc);
4167
	intel_crtc_load_lut(crtc);
-
 
4168
 
3738
 
4169
	intel_update_watermarks(crtc);
3739
	intel_enable_pipe(dev_priv, pipe, false);
4170
	intel_enable_pipe(dev_priv, pipe, false, is_dsi);
3740
	intel_enable_plane(dev_priv, plane, pipe);
4171
	intel_enable_primary_plane(dev_priv, plane, pipe);
3741
	intel_enable_planes(crtc);
4172
	intel_enable_planes(crtc);
3742
	intel_crtc_update_cursor(crtc, true);
4173
	intel_crtc_update_cursor(crtc, true);
3743
 
4174
 
3744
	intel_update_fbc(dev);
4175
	intel_update_fbc(dev);
3745
 
4176
 
3746
	for_each_encoder_on_crtc(dev, crtc, encoder)
4177
	for_each_encoder_on_crtc(dev, crtc, encoder)
3747
		encoder->enable(encoder);
4178
		encoder->enable(encoder);
3748
}
4179
}
3749
 
4180
 
3750
static void i9xx_crtc_enable(struct drm_crtc *crtc)
4181
static void i9xx_crtc_enable(struct drm_crtc *crtc)
3751
{
4182
{
3752
    struct drm_device *dev = crtc->dev;
4183
    struct drm_device *dev = crtc->dev;
3753
    struct drm_i915_private *dev_priv = dev->dev_private;
4184
    struct drm_i915_private *dev_priv = dev->dev_private;
3754
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4185
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3755
	struct intel_encoder *encoder;
4186
	struct intel_encoder *encoder;
3756
    int pipe = intel_crtc->pipe;
4187
    int pipe = intel_crtc->pipe;
3757
    int plane = intel_crtc->plane;
4188
    int plane = intel_crtc->plane;
3758
 
4189
 
3759
	WARN_ON(!crtc->enabled);
4190
	WARN_ON(!crtc->enabled);
3760
 
4191
 
3761
    if (intel_crtc->active)
4192
    if (intel_crtc->active)
3762
        return;
4193
        return;
3763
 
4194
 
3764
    intel_crtc->active = true;
4195
    intel_crtc->active = true;
3765
    intel_update_watermarks(dev);
-
 
3766
 
4196
 
3767
	for_each_encoder_on_crtc(dev, crtc, encoder)
4197
	for_each_encoder_on_crtc(dev, crtc, encoder)
3768
		if (encoder->pre_enable)
4198
		if (encoder->pre_enable)
3769
			encoder->pre_enable(encoder);
4199
			encoder->pre_enable(encoder);
3770
 
4200
 
3771
	i9xx_enable_pll(intel_crtc);
4201
	i9xx_enable_pll(intel_crtc);
3772
 
4202
 
3773
	i9xx_pfit_enable(intel_crtc);
4203
	i9xx_pfit_enable(intel_crtc);
3774
 
4204
 
3775
	intel_crtc_load_lut(crtc);
4205
	intel_crtc_load_lut(crtc);
-
 
4206
 
3776
 
4207
	intel_update_watermarks(crtc);
3777
    intel_enable_pipe(dev_priv, pipe, false);
4208
	intel_enable_pipe(dev_priv, pipe, false, false);
3778
    intel_enable_plane(dev_priv, plane, pipe);
4209
	intel_enable_primary_plane(dev_priv, plane, pipe);
3779
	intel_enable_planes(crtc);
4210
	intel_enable_planes(crtc);
3780
	/* The fixup needs to happen before cursor is enabled */
4211
	/* The fixup needs to happen before cursor is enabled */
3781
	if (IS_G4X(dev))
4212
	if (IS_G4X(dev))
3782
		g4x_fixup_plane(dev_priv, pipe);
4213
		g4x_fixup_plane(dev_priv, pipe);
3783
	intel_crtc_update_cursor(crtc, true);
4214
	intel_crtc_update_cursor(crtc, true);
3784
 
4215
 
3785
    /* Give the overlay scaler a chance to enable if it's on this pipe */
4216
    /* Give the overlay scaler a chance to enable if it's on this pipe */
3786
    intel_crtc_dpms_overlay(intel_crtc, true);
4217
    intel_crtc_dpms_overlay(intel_crtc, true);
3787
 
4218
 
3788
	intel_update_fbc(dev);
4219
	intel_update_fbc(dev);
3789
 
4220
 
3790
	for_each_encoder_on_crtc(dev, crtc, encoder)
4221
	for_each_encoder_on_crtc(dev, crtc, encoder)
3791
		encoder->enable(encoder);
4222
		encoder->enable(encoder);
3792
}
4223
}
3793
 
4224
 
3794
static void i9xx_pfit_disable(struct intel_crtc *crtc)
4225
static void i9xx_pfit_disable(struct intel_crtc *crtc)
3795
{
4226
{
3796
	struct drm_device *dev = crtc->base.dev;
4227
	struct drm_device *dev = crtc->base.dev;
3797
	struct drm_i915_private *dev_priv = dev->dev_private;
4228
	struct drm_i915_private *dev_priv = dev->dev_private;
3798
 
4229
 
3799
	if (!crtc->config.gmch_pfit.control)
4230
	if (!crtc->config.gmch_pfit.control)
3800
		return;
4231
		return;
3801
 
4232
 
3802
	assert_pipe_disabled(dev_priv, crtc->pipe);
4233
	assert_pipe_disabled(dev_priv, crtc->pipe);
3803
 
4234
 
3804
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4235
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
3805
			 I915_READ(PFIT_CONTROL));
4236
			 I915_READ(PFIT_CONTROL));
3806
		I915_WRITE(PFIT_CONTROL, 0);
4237
		I915_WRITE(PFIT_CONTROL, 0);
3807
}
4238
}
3808
 
4239
 
3809
static void i9xx_crtc_disable(struct drm_crtc *crtc)
4240
static void i9xx_crtc_disable(struct drm_crtc *crtc)
3810
{
4241
{
3811
    struct drm_device *dev = crtc->dev;
4242
    struct drm_device *dev = crtc->dev;
3812
    struct drm_i915_private *dev_priv = dev->dev_private;
4243
    struct drm_i915_private *dev_priv = dev->dev_private;
3813
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4244
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3814
	struct intel_encoder *encoder;
4245
	struct intel_encoder *encoder;
3815
    int pipe = intel_crtc->pipe;
4246
    int pipe = intel_crtc->pipe;
3816
    int plane = intel_crtc->plane;
4247
    int plane = intel_crtc->plane;
3817
 
4248
 
3818
    if (!intel_crtc->active)
4249
    if (!intel_crtc->active)
3819
        return;
4250
        return;
3820
 
4251
 
3821
	for_each_encoder_on_crtc(dev, crtc, encoder)
4252
	for_each_encoder_on_crtc(dev, crtc, encoder)
3822
		encoder->disable(encoder);
4253
		encoder->disable(encoder);
3823
 
4254
 
3824
    /* Give the overlay scaler a chance to disable if it's on this pipe */
4255
    /* Give the overlay scaler a chance to disable if it's on this pipe */
3825
//    intel_crtc_wait_for_pending_flips(crtc);
4256
//    intel_crtc_wait_for_pending_flips(crtc);
3826
//    drm_vblank_off(dev, pipe);
4257
//    drm_vblank_off(dev, pipe);
3827
 
4258
 
3828
	if (dev_priv->fbc.plane == plane)
4259
	if (dev_priv->fbc.plane == plane)
3829
        intel_disable_fbc(dev);
4260
        intel_disable_fbc(dev);
3830
 
4261
 
3831
	intel_crtc_dpms_overlay(intel_crtc, false);
4262
	intel_crtc_dpms_overlay(intel_crtc, false);
3832
	intel_crtc_update_cursor(crtc, false);
4263
	intel_crtc_update_cursor(crtc, false);
3833
	intel_disable_planes(crtc);
4264
	intel_disable_planes(crtc);
3834
    intel_disable_plane(dev_priv, plane, pipe);
4265
	intel_disable_primary_plane(dev_priv, plane, pipe);
3835
 
4266
 
3836
    intel_disable_pipe(dev_priv, pipe);
4267
    intel_disable_pipe(dev_priv, pipe);
3837
 
4268
 
3838
	i9xx_pfit_disable(intel_crtc);
4269
	i9xx_pfit_disable(intel_crtc);
3839
 
4270
 
3840
	for_each_encoder_on_crtc(dev, crtc, encoder)
4271
	for_each_encoder_on_crtc(dev, crtc, encoder)
3841
		if (encoder->post_disable)
4272
		if (encoder->post_disable)
3842
			encoder->post_disable(encoder);
4273
			encoder->post_disable(encoder);
3843
 
4274
 
3844
	if (IS_VALLEYVIEW(dev))
4275
	if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3845
		vlv_disable_pll(dev_priv, pipe);
4276
		vlv_disable_pll(dev_priv, pipe);
3846
	else
4277
	else if (!IS_VALLEYVIEW(dev))
3847
	i9xx_disable_pll(dev_priv, pipe);
4278
	i9xx_disable_pll(dev_priv, pipe);
3848
 
4279
 
3849
    intel_crtc->active = false;
4280
    intel_crtc->active = false;
-
 
4281
	intel_update_watermarks(crtc);
-
 
4282
 
3850
    intel_update_fbc(dev);
4283
    intel_update_fbc(dev);
3851
    intel_update_watermarks(dev);
-
 
3852
}
4284
}
3853
 
4285
 
3854
static void i9xx_crtc_off(struct drm_crtc *crtc)
4286
static void i9xx_crtc_off(struct drm_crtc *crtc)
3855
{
4287
{
3856
}
4288
}
3857
 
4289
 
3858
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4290
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3859
				    bool enabled)
4291
				    bool enabled)
3860
{
4292
{
3861
	struct drm_device *dev = crtc->dev;
4293
	struct drm_device *dev = crtc->dev;
3862
	struct drm_i915_master_private *master_priv;
4294
	struct drm_i915_master_private *master_priv;
3863
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4295
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3864
	int pipe = intel_crtc->pipe;
4296
	int pipe = intel_crtc->pipe;
3865
 
4297
 
3866
 
4298
 
3867
#if 0
4299
#if 0
3868
	if (!dev->primary->master)
4300
	if (!dev->primary->master)
3869
		return;
4301
		return;
3870
 
4302
 
3871
	master_priv = dev->primary->master->driver_priv;
4303
	master_priv = dev->primary->master->driver_priv;
3872
	if (!master_priv->sarea_priv)
4304
	if (!master_priv->sarea_priv)
3873
		return;
4305
		return;
3874
 
4306
 
3875
	switch (pipe) {
4307
	switch (pipe) {
3876
	case 0:
4308
	case 0:
3877
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
4309
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3878
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
4310
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3879
		break;
4311
		break;
3880
	case 1:
4312
	case 1:
3881
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
4313
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3882
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
4314
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3883
		break;
4315
		break;
3884
	default:
4316
	default:
3885
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
4317
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3886
		break;
4318
		break;
3887
	}
4319
	}
3888
#endif
4320
#endif
3889
 
4321
 
3890
}
4322
}
3891
 
4323
 
3892
/**
4324
/**
3893
 * Sets the power management mode of the pipe and plane.
4325
 * Sets the power management mode of the pipe and plane.
3894
 */
4326
 */
3895
void intel_crtc_update_dpms(struct drm_crtc *crtc)
4327
void intel_crtc_update_dpms(struct drm_crtc *crtc)
3896
{
4328
{
3897
	struct drm_device *dev = crtc->dev;
4329
	struct drm_device *dev = crtc->dev;
3898
	struct drm_i915_private *dev_priv = dev->dev_private;
4330
	struct drm_i915_private *dev_priv = dev->dev_private;
3899
	struct intel_encoder *intel_encoder;
4331
	struct intel_encoder *intel_encoder;
3900
	bool enable = false;
4332
	bool enable = false;
3901
 
4333
 
3902
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4334
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3903
		enable |= intel_encoder->connectors_active;
4335
		enable |= intel_encoder->connectors_active;
3904
 
4336
 
3905
	if (enable)
4337
	if (enable)
3906
		dev_priv->display.crtc_enable(crtc);
4338
		dev_priv->display.crtc_enable(crtc);
3907
	else
4339
	else
3908
		dev_priv->display.crtc_disable(crtc);
4340
		dev_priv->display.crtc_disable(crtc);
3909
 
4341
 
3910
	intel_crtc_update_sarea(crtc, enable);
4342
	intel_crtc_update_sarea(crtc, enable);
3911
}
4343
}
3912
 
4344
 
3913
static void intel_crtc_disable(struct drm_crtc *crtc)
4345
static void intel_crtc_disable(struct drm_crtc *crtc)
3914
{
4346
{
3915
	struct drm_device *dev = crtc->dev;
4347
	struct drm_device *dev = crtc->dev;
3916
	struct drm_connector *connector;
4348
	struct drm_connector *connector;
3917
	struct drm_i915_private *dev_priv = dev->dev_private;
4349
	struct drm_i915_private *dev_priv = dev->dev_private;
3918
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4350
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3919
 
4351
 
3920
	/* crtc should still be enabled when we disable it. */
4352
	/* crtc should still be enabled when we disable it. */
3921
	WARN_ON(!crtc->enabled);
4353
	WARN_ON(!crtc->enabled);
3922
 
4354
 
3923
	dev_priv->display.crtc_disable(crtc);
4355
	dev_priv->display.crtc_disable(crtc);
3924
	intel_crtc->eld_vld = false;
4356
	intel_crtc->eld_vld = false;
3925
	intel_crtc_update_sarea(crtc, false);
4357
	intel_crtc_update_sarea(crtc, false);
3926
	dev_priv->display.off(crtc);
4358
	dev_priv->display.off(crtc);
3927
 
4359
 
3928
	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4360
	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
-
 
4361
	assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
3929
	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
4362
	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3930
 
4363
 
3931
	if (crtc->fb) {
4364
	if (crtc->fb) {
3932
		mutex_lock(&dev->struct_mutex);
4365
		mutex_lock(&dev->struct_mutex);
3933
		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
4366
		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3934
		mutex_unlock(&dev->struct_mutex);
4367
		mutex_unlock(&dev->struct_mutex);
3935
		crtc->fb = NULL;
4368
		crtc->fb = NULL;
3936
	}
4369
	}
3937
 
4370
 
3938
	/* Update computed state. */
4371
	/* Update computed state. */
3939
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4372
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3940
		if (!connector->encoder || !connector->encoder->crtc)
4373
		if (!connector->encoder || !connector->encoder->crtc)
3941
			continue;
4374
			continue;
3942
 
4375
 
3943
		if (connector->encoder->crtc != crtc)
4376
		if (connector->encoder->crtc != crtc)
3944
			continue;
4377
			continue;
3945
 
4378
 
3946
		connector->dpms = DRM_MODE_DPMS_OFF;
4379
		connector->dpms = DRM_MODE_DPMS_OFF;
3947
		to_intel_encoder(connector->encoder)->connectors_active = false;
4380
		to_intel_encoder(connector->encoder)->connectors_active = false;
3948
	}
4381
	}
3949
}
4382
}
3950
 
4383
 
3951
void intel_encoder_destroy(struct drm_encoder *encoder)
4384
void intel_encoder_destroy(struct drm_encoder *encoder)
3952
{
4385
{
3953
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4386
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3954
 
4387
 
3955
	drm_encoder_cleanup(encoder);
4388
	drm_encoder_cleanup(encoder);
3956
	kfree(intel_encoder);
4389
	kfree(intel_encoder);
3957
}
4390
}
3958
 
4391
 
3959
/* Simple dpms helper for encoders with just one connector, no cloning and only
4392
/* Simple dpms helper for encoders with just one connector, no cloning and only
3960
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
4393
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3961
 * state of the entire output pipe. */
4394
 * state of the entire output pipe. */
3962
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
4395
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
3963
{
4396
{
3964
	if (mode == DRM_MODE_DPMS_ON) {
4397
	if (mode == DRM_MODE_DPMS_ON) {
3965
		encoder->connectors_active = true;
4398
		encoder->connectors_active = true;
3966
 
4399
 
3967
		intel_crtc_update_dpms(encoder->base.crtc);
4400
		intel_crtc_update_dpms(encoder->base.crtc);
3968
	} else {
4401
	} else {
3969
		encoder->connectors_active = false;
4402
		encoder->connectors_active = false;
3970
 
4403
 
3971
		intel_crtc_update_dpms(encoder->base.crtc);
4404
		intel_crtc_update_dpms(encoder->base.crtc);
3972
	}
4405
	}
3973
}
4406
}
3974
 
4407
 
3975
/* Cross check the actual hw state with our own modeset state tracking (and it's
4408
/* Cross check the actual hw state with our own modeset state tracking (and it's
3976
 * internal consistency). */
4409
 * internal consistency). */
3977
static void intel_connector_check_state(struct intel_connector *connector)
4410
static void intel_connector_check_state(struct intel_connector *connector)
3978
{
4411
{
3979
	if (connector->get_hw_state(connector)) {
4412
	if (connector->get_hw_state(connector)) {
3980
		struct intel_encoder *encoder = connector->encoder;
4413
		struct intel_encoder *encoder = connector->encoder;
3981
		struct drm_crtc *crtc;
4414
		struct drm_crtc *crtc;
3982
		bool encoder_enabled;
4415
		bool encoder_enabled;
3983
		enum pipe pipe;
4416
		enum pipe pipe;
3984
 
4417
 
3985
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4418
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3986
			      connector->base.base.id,
4419
			      connector->base.base.id,
3987
			      drm_get_connector_name(&connector->base));
4420
			      drm_get_connector_name(&connector->base));
3988
 
4421
 
3989
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
4422
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
3990
		     "wrong connector dpms state\n");
4423
		     "wrong connector dpms state\n");
3991
		WARN(connector->base.encoder != &encoder->base,
4424
		WARN(connector->base.encoder != &encoder->base,
3992
		     "active connector not linked to encoder\n");
4425
		     "active connector not linked to encoder\n");
3993
		WARN(!encoder->connectors_active,
4426
		WARN(!encoder->connectors_active,
3994
		     "encoder->connectors_active not set\n");
4427
		     "encoder->connectors_active not set\n");
3995
 
4428
 
3996
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
4429
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
3997
		WARN(!encoder_enabled, "encoder not enabled\n");
4430
		WARN(!encoder_enabled, "encoder not enabled\n");
3998
		if (WARN_ON(!encoder->base.crtc))
4431
		if (WARN_ON(!encoder->base.crtc))
3999
			return;
4432
			return;
4000
 
4433
 
4001
		crtc = encoder->base.crtc;
4434
		crtc = encoder->base.crtc;
4002
 
4435
 
4003
		WARN(!crtc->enabled, "crtc not enabled\n");
4436
		WARN(!crtc->enabled, "crtc not enabled\n");
4004
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
4437
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
4005
		WARN(pipe != to_intel_crtc(crtc)->pipe,
4438
		WARN(pipe != to_intel_crtc(crtc)->pipe,
4006
		     "encoder active on the wrong pipe\n");
4439
		     "encoder active on the wrong pipe\n");
4007
	}
4440
	}
4008
}
4441
}
4009
 
4442
 
4010
/* Even simpler default implementation, if there's really no special case to
4443
/* Even simpler default implementation, if there's really no special case to
4011
 * consider. */
4444
 * consider. */
4012
void intel_connector_dpms(struct drm_connector *connector, int mode)
4445
void intel_connector_dpms(struct drm_connector *connector, int mode)
4013
{
4446
{
4014
	/* All the simple cases only support two dpms states. */
4447
	/* All the simple cases only support two dpms states. */
4015
	if (mode != DRM_MODE_DPMS_ON)
4448
	if (mode != DRM_MODE_DPMS_ON)
4016
		mode = DRM_MODE_DPMS_OFF;
4449
		mode = DRM_MODE_DPMS_OFF;
4017
 
4450
 
4018
	if (mode == connector->dpms)
4451
	if (mode == connector->dpms)
4019
		return;
4452
		return;
4020
 
4453
 
4021
	connector->dpms = mode;
4454
	connector->dpms = mode;
4022
 
4455
 
4023
	/* Only need to change hw state when actually enabled */
4456
	/* Only need to change hw state when actually enabled */
4024
	if (connector->encoder)
4457
	if (connector->encoder)
4025
		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
4458
		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
4026
 
4459
 
4027
	intel_modeset_check_state(connector->dev);
4460
	intel_modeset_check_state(connector->dev);
4028
}
4461
}
4029
 
4462
 
4030
/* Simple connector->get_hw_state implementation for encoders that support only
4463
/* Simple connector->get_hw_state implementation for encoders that support only
4031
 * one connector and no cloning and hence the encoder state determines the state
4464
 * one connector and no cloning and hence the encoder state determines the state
4032
 * of the connector. */
4465
 * of the connector. */
4033
bool intel_connector_get_hw_state(struct intel_connector *connector)
4466
bool intel_connector_get_hw_state(struct intel_connector *connector)
4034
{
4467
{
4035
	enum pipe pipe = 0;
4468
	enum pipe pipe = 0;
4036
	struct intel_encoder *encoder = connector->encoder;
4469
	struct intel_encoder *encoder = connector->encoder;
4037
 
4470
 
4038
	return encoder->get_hw_state(encoder, &pipe);
4471
	return encoder->get_hw_state(encoder, &pipe);
4039
}
4472
}
4040
 
4473
 
4041
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
4474
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
4042
				     struct intel_crtc_config *pipe_config)
4475
				     struct intel_crtc_config *pipe_config)
4043
{
4476
{
4044
	struct drm_i915_private *dev_priv = dev->dev_private;
4477
	struct drm_i915_private *dev_priv = dev->dev_private;
4045
	struct intel_crtc *pipe_B_crtc =
4478
	struct intel_crtc *pipe_B_crtc =
4046
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
4479
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
4047
 
4480
 
4048
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
4481
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
4049
		      pipe_name(pipe), pipe_config->fdi_lanes);
4482
		      pipe_name(pipe), pipe_config->fdi_lanes);
4050
	if (pipe_config->fdi_lanes > 4) {
4483
	if (pipe_config->fdi_lanes > 4) {
4051
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
4484
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
4052
			      pipe_name(pipe), pipe_config->fdi_lanes);
4485
			      pipe_name(pipe), pipe_config->fdi_lanes);
4053
		return false;
4486
		return false;
4054
	}
4487
	}
4055
 
4488
 
4056
	if (IS_HASWELL(dev)) {
4489
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4057
		if (pipe_config->fdi_lanes > 2) {
4490
		if (pipe_config->fdi_lanes > 2) {
4058
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
4491
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
4059
				      pipe_config->fdi_lanes);
4492
				      pipe_config->fdi_lanes);
4060
			return false;
4493
			return false;
4061
		} else {
4494
		} else {
4062
			return true;
4495
			return true;
4063
		}
4496
		}
4064
	}
4497
	}
4065
 
4498
 
4066
	if (INTEL_INFO(dev)->num_pipes == 2)
4499
	if (INTEL_INFO(dev)->num_pipes == 2)
4067
		return true;
4500
		return true;
4068
 
4501
 
4069
	/* Ivybridge 3 pipe is really complicated */
4502
	/* Ivybridge 3 pipe is really complicated */
4070
	switch (pipe) {
4503
	switch (pipe) {
4071
	case PIPE_A:
4504
	case PIPE_A:
4072
		return true;
4505
		return true;
4073
	case PIPE_B:
4506
	case PIPE_B:
4074
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
4507
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
4075
		    pipe_config->fdi_lanes > 2) {
4508
		    pipe_config->fdi_lanes > 2) {
4076
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4509
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4077
				      pipe_name(pipe), pipe_config->fdi_lanes);
4510
				      pipe_name(pipe), pipe_config->fdi_lanes);
4078
			return false;
4511
			return false;
4079
		}
4512
		}
4080
		return true;
4513
		return true;
4081
	case PIPE_C:
4514
	case PIPE_C:
4082
		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
4515
		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
4083
		    pipe_B_crtc->config.fdi_lanes <= 2) {
4516
		    pipe_B_crtc->config.fdi_lanes <= 2) {
4084
			if (pipe_config->fdi_lanes > 2) {
4517
			if (pipe_config->fdi_lanes > 2) {
4085
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4518
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4086
					      pipe_name(pipe), pipe_config->fdi_lanes);
4519
					      pipe_name(pipe), pipe_config->fdi_lanes);
4087
				return false;
4520
				return false;
4088
			}
4521
			}
4089
		} else {
4522
		} else {
4090
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
4523
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
4091
			return false;
4524
			return false;
4092
		}
4525
		}
4093
		return true;
4526
		return true;
4094
	default:
4527
	default:
4095
		BUG();
4528
		BUG();
4096
	}
4529
	}
4097
}
4530
}
4098
 
4531
 
4099
#define RETRY 1
4532
#define RETRY 1
4100
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
4533
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
4101
				      struct intel_crtc_config *pipe_config)
4534
				      struct intel_crtc_config *pipe_config)
4102
{
4535
{
4103
	struct drm_device *dev = intel_crtc->base.dev;
4536
	struct drm_device *dev = intel_crtc->base.dev;
4104
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4537
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4105
	int lane, link_bw, fdi_dotclock;
4538
	int lane, link_bw, fdi_dotclock;
4106
	bool setup_ok, needs_recompute = false;
4539
	bool setup_ok, needs_recompute = false;
4107
 
4540
 
4108
retry:
4541
retry:
4109
	/* FDI is a binary signal running at ~2.7GHz, encoding
4542
	/* FDI is a binary signal running at ~2.7GHz, encoding
4110
	 * each output octet as 10 bits. The actual frequency
4543
	 * each output octet as 10 bits. The actual frequency
4111
	 * is stored as a divider into a 100MHz clock, and the
4544
	 * is stored as a divider into a 100MHz clock, and the
4112
	 * mode pixel clock is stored in units of 1KHz.
4545
	 * mode pixel clock is stored in units of 1KHz.
4113
	 * Hence the bw of each lane in terms of the mode signal
4546
	 * Hence the bw of each lane in terms of the mode signal
4114
	 * is:
4547
	 * is:
4115
	 */
4548
	 */
4116
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4549
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4117
 
4550
 
4118
	fdi_dotclock = adjusted_mode->clock;
-
 
4119
	fdi_dotclock /= pipe_config->pixel_multiplier;
4551
	fdi_dotclock = adjusted_mode->crtc_clock;
4120
 
4552
 
4121
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
4553
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
4122
					   pipe_config->pipe_bpp);
4554
					   pipe_config->pipe_bpp);
4123
 
4555
 
4124
	pipe_config->fdi_lanes = lane;
4556
	pipe_config->fdi_lanes = lane;
4125
 
4557
 
4126
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
4558
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
4127
			       link_bw, &pipe_config->fdi_m_n);
4559
			       link_bw, &pipe_config->fdi_m_n);
4128
 
4560
 
4129
	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
4561
	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
4130
					    intel_crtc->pipe, pipe_config);
4562
					    intel_crtc->pipe, pipe_config);
4131
	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
4563
	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
4132
		pipe_config->pipe_bpp -= 2*3;
4564
		pipe_config->pipe_bpp -= 2*3;
4133
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
4565
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
4134
			      pipe_config->pipe_bpp);
4566
			      pipe_config->pipe_bpp);
4135
		needs_recompute = true;
4567
		needs_recompute = true;
4136
		pipe_config->bw_constrained = true;
4568
		pipe_config->bw_constrained = true;
4137
 
4569
 
4138
		goto retry;
4570
		goto retry;
4139
	}
4571
	}
4140
 
4572
 
4141
	if (needs_recompute)
4573
	if (needs_recompute)
4142
		return RETRY;
4574
		return RETRY;
4143
 
4575
 
4144
	return setup_ok ? 0 : -EINVAL;
4576
	return setup_ok ? 0 : -EINVAL;
4145
}
4577
}
4146
 
4578
 
4147
static void hsw_compute_ips_config(struct intel_crtc *crtc,
4579
static void hsw_compute_ips_config(struct intel_crtc *crtc,
4148
				   struct intel_crtc_config *pipe_config)
4580
				   struct intel_crtc_config *pipe_config)
4149
{
4581
{
4150
	pipe_config->ips_enabled = i915_enable_ips &&
4582
	pipe_config->ips_enabled = i915_enable_ips &&
4151
				   hsw_crtc_supports_ips(crtc) &&
4583
				   hsw_crtc_supports_ips(crtc) &&
4152
				   pipe_config->pipe_bpp <= 24;
4584
				   pipe_config->pipe_bpp <= 24;
4153
}
4585
}
4154
 
4586
 
4155
static int intel_crtc_compute_config(struct intel_crtc *crtc,
4587
static int intel_crtc_compute_config(struct intel_crtc *crtc,
4156
				     struct intel_crtc_config *pipe_config)
4588
				     struct intel_crtc_config *pipe_config)
4157
{
4589
{
4158
	struct drm_device *dev = crtc->base.dev;
4590
	struct drm_device *dev = crtc->base.dev;
4159
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4591
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
-
 
4592
 
4160
 
4593
	/* FIXME should check pixel clock limits on all platforms */
-
 
4594
	if (INTEL_INFO(dev)->gen < 4) {
-
 
4595
		struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4596
		int clock_limit =
-
 
4597
			dev_priv->display.get_display_clock_speed(dev);
-
 
4598
 
-
 
4599
		/*
-
 
4600
		 * Enable pixel doubling when the dot clock
-
 
4601
		 * is > 90% of the (display) core speed.
4161
	if (HAS_PCH_SPLIT(dev)) {
4602
		 *
-
 
4603
		 * GDG double wide on either pipe,
-
 
4604
		 * otherwise pipe A only.
-
 
4605
		 */
-
 
4606
		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
-
 
4607
		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
4162
		/* FDI link clock is fixed at 2.7G */
4608
			clock_limit *= 2;
-
 
4609
			pipe_config->double_wide = true;
-
 
4610
		}
4163
		if (pipe_config->requested_mode.clock * 3
4611
 
4164
		    > IRONLAKE_FDI_FREQ * 4)
4612
		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4165
			return -EINVAL;
4613
			return -EINVAL;
4166
	}
4614
	}
-
 
4615
 
-
 
4616
	/*
-
 
4617
	 * Pipe horizontal size must be even in:
-
 
4618
	 * - DVO ganged mode
-
 
4619
	 * - LVDS dual channel mode
-
 
4620
	 * - Double wide pipe
-
 
4621
	 */
-
 
4622
	if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
-
 
4623
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
-
 
4624
		pipe_config->pipe_src_w &= ~1;
4167
 
4625
 
4168
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4626
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4169
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4627
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4170
	 */
4628
	 */
4171
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
4629
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
4172
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
4630
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
4173
		return -EINVAL;
4631
		return -EINVAL;
4174
 
4632
 
4175
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
4633
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
4176
		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
4634
		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
4177
	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
4635
	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
4178
		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
4636
		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
4179
		 * for lvds. */
4637
		 * for lvds. */
4180
		pipe_config->pipe_bpp = 8*3;
4638
		pipe_config->pipe_bpp = 8*3;
4181
	}
4639
	}
4182
 
4640
 
4183
	if (HAS_IPS(dev))
4641
	if (HAS_IPS(dev))
4184
		hsw_compute_ips_config(crtc, pipe_config);
4642
		hsw_compute_ips_config(crtc, pipe_config);
4185
 
4643
 
4186
	/* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
4644
	/* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
4187
	 * clock survives for now. */
4645
	 * clock survives for now. */
4188
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4646
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4189
		pipe_config->shared_dpll = crtc->config.shared_dpll;
4647
		pipe_config->shared_dpll = crtc->config.shared_dpll;
4190
 
4648
 
4191
	if (pipe_config->has_pch_encoder)
4649
	if (pipe_config->has_pch_encoder)
4192
		return ironlake_fdi_compute_config(crtc, pipe_config);
4650
		return ironlake_fdi_compute_config(crtc, pipe_config);
4193
 
4651
 
4194
	return 0;
4652
	return 0;
4195
}
4653
}
4196
 
4654
 
4197
static int valleyview_get_display_clock_speed(struct drm_device *dev)
4655
static int valleyview_get_display_clock_speed(struct drm_device *dev)
4198
{
4656
{
4199
	return 400000; /* FIXME */
4657
	return 400000; /* FIXME */
4200
}
4658
}
4201
 
4659
 
4202
static int i945_get_display_clock_speed(struct drm_device *dev)
4660
static int i945_get_display_clock_speed(struct drm_device *dev)
4203
{
4661
{
4204
	return 400000;
4662
	return 400000;
4205
}
4663
}
4206
 
4664
 
4207
static int i915_get_display_clock_speed(struct drm_device *dev)
4665
static int i915_get_display_clock_speed(struct drm_device *dev)
4208
{
4666
{
4209
	return 333000;
4667
	return 333000;
4210
}
4668
}
4211
 
4669
 
4212
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
4670
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
4213
{
4671
{
4214
	return 200000;
4672
	return 200000;
4215
}
4673
}
4216
 
4674
 
4217
static int pnv_get_display_clock_speed(struct drm_device *dev)
4675
static int pnv_get_display_clock_speed(struct drm_device *dev)
4218
{
4676
{
4219
	u16 gcfgc = 0;
4677
	u16 gcfgc = 0;
4220
 
4678
 
4221
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4679
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4222
 
4680
 
4223
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4681
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4224
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
4682
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
4225
		return 267000;
4683
		return 267000;
4226
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
4684
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
4227
		return 333000;
4685
		return 333000;
4228
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
4686
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
4229
		return 444000;
4687
		return 444000;
4230
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
4688
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
4231
		return 200000;
4689
		return 200000;
4232
	default:
4690
	default:
4233
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
4691
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
4234
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
4692
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
4235
		return 133000;
4693
		return 133000;
4236
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
4694
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
4237
		return 167000;
4695
		return 167000;
4238
	}
4696
	}
4239
}
4697
}
4240
 
4698
 
4241
static int i915gm_get_display_clock_speed(struct drm_device *dev)
4699
static int i915gm_get_display_clock_speed(struct drm_device *dev)
4242
{
4700
{
4243
	u16 gcfgc = 0;
4701
	u16 gcfgc = 0;
4244
 
4702
 
4245
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4703
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4246
 
4704
 
4247
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
4705
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
4248
		return 133000;
4706
		return 133000;
4249
	else {
4707
	else {
4250
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4708
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4251
		case GC_DISPLAY_CLOCK_333_MHZ:
4709
		case GC_DISPLAY_CLOCK_333_MHZ:
4252
			return 333000;
4710
			return 333000;
4253
		default:
4711
		default:
4254
		case GC_DISPLAY_CLOCK_190_200_MHZ:
4712
		case GC_DISPLAY_CLOCK_190_200_MHZ:
4255
			return 190000;
4713
			return 190000;
4256
		}
4714
		}
4257
	}
4715
	}
4258
}
4716
}
4259
 
4717
 
4260
static int i865_get_display_clock_speed(struct drm_device *dev)
4718
static int i865_get_display_clock_speed(struct drm_device *dev)
4261
{
4719
{
4262
	return 266000;
4720
	return 266000;
4263
}
4721
}
4264
 
4722
 
4265
static int i855_get_display_clock_speed(struct drm_device *dev)
4723
static int i855_get_display_clock_speed(struct drm_device *dev)
4266
{
4724
{
4267
	u16 hpllcc = 0;
4725
	u16 hpllcc = 0;
4268
	/* Assume that the hardware is in the high speed state.  This
4726
	/* Assume that the hardware is in the high speed state.  This
4269
	 * should be the default.
4727
	 * should be the default.
4270
	 */
4728
	 */
4271
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
4729
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
4272
	case GC_CLOCK_133_200:
4730
	case GC_CLOCK_133_200:
4273
	case GC_CLOCK_100_200:
4731
	case GC_CLOCK_100_200:
4274
		return 200000;
4732
		return 200000;
4275
	case GC_CLOCK_166_250:
4733
	case GC_CLOCK_166_250:
4276
		return 250000;
4734
		return 250000;
4277
	case GC_CLOCK_100_133:
4735
	case GC_CLOCK_100_133:
4278
		return 133000;
4736
		return 133000;
4279
	}
4737
	}
4280
 
4738
 
4281
	/* Shouldn't happen */
4739
	/* Shouldn't happen */
4282
	return 0;
4740
	return 0;
4283
}
4741
}
4284
 
4742
 
4285
static int i830_get_display_clock_speed(struct drm_device *dev)
4743
static int i830_get_display_clock_speed(struct drm_device *dev)
4286
{
4744
{
4287
	return 133000;
4745
	return 133000;
4288
}
4746
}
4289
 
4747
 
4290
static void
4748
static void
4291
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
4749
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
4292
{
4750
{
4293
	while (*num > DATA_LINK_M_N_MASK ||
4751
	while (*num > DATA_LINK_M_N_MASK ||
4294
	       *den > DATA_LINK_M_N_MASK) {
4752
	       *den > DATA_LINK_M_N_MASK) {
4295
		*num >>= 1;
4753
		*num >>= 1;
4296
		*den >>= 1;
4754
		*den >>= 1;
4297
	}
4755
	}
4298
}
4756
}
4299
 
4757
 
4300
static void compute_m_n(unsigned int m, unsigned int n,
4758
static void compute_m_n(unsigned int m, unsigned int n,
4301
			uint32_t *ret_m, uint32_t *ret_n)
4759
			uint32_t *ret_m, uint32_t *ret_n)
4302
{
4760
{
4303
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4761
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4304
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
4762
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
4305
	intel_reduce_m_n_ratio(ret_m, ret_n);
4763
	intel_reduce_m_n_ratio(ret_m, ret_n);
4306
}
4764
}
4307
 
4765
 
4308
void
4766
void
4309
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4767
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4310
		       int pixel_clock, int link_clock,
4768
		       int pixel_clock, int link_clock,
4311
		       struct intel_link_m_n *m_n)
4769
		       struct intel_link_m_n *m_n)
4312
{
4770
{
4313
	m_n->tu = 64;
4771
	m_n->tu = 64;
4314
 
4772
 
4315
	compute_m_n(bits_per_pixel * pixel_clock,
4773
	compute_m_n(bits_per_pixel * pixel_clock,
4316
		    link_clock * nlanes * 8,
4774
		    link_clock * nlanes * 8,
4317
		    &m_n->gmch_m, &m_n->gmch_n);
4775
		    &m_n->gmch_m, &m_n->gmch_n);
4318
 
4776
 
4319
	compute_m_n(pixel_clock, link_clock,
4777
	compute_m_n(pixel_clock, link_clock,
4320
		    &m_n->link_m, &m_n->link_n);
4778
		    &m_n->link_m, &m_n->link_n);
4321
}
4779
}
4322
 
4780
 
4323
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4781
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4324
{
4782
{
4325
	if (i915_panel_use_ssc >= 0)
4783
	if (i915_panel_use_ssc >= 0)
4326
		return i915_panel_use_ssc != 0;
4784
		return i915_panel_use_ssc != 0;
4327
	return dev_priv->vbt.lvds_use_ssc
4785
	return dev_priv->vbt.lvds_use_ssc
4328
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4786
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4329
}
4787
}
4330
 
-
 
4331
static int vlv_get_refclk(struct drm_crtc *crtc)
-
 
4332
{
-
 
4333
	struct drm_device *dev = crtc->dev;
-
 
4334
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4335
	int refclk = 27000; /* for DP & HDMI */
-
 
4336
 
-
 
4337
	return 100000; /* only one validated so far */
-
 
4338
 
-
 
4339
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
-
 
4340
		refclk = 96000;
-
 
4341
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-
 
4342
		if (intel_panel_use_ssc(dev_priv))
-
 
4343
			refclk = 100000;
-
 
4344
		else
-
 
4345
			refclk = 96000;
-
 
4346
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
-
 
4347
		refclk = 100000;
-
 
4348
	}
-
 
4349
 
-
 
4350
	return refclk;
-
 
4351
}
-
 
4352
 
4788
 
4353
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4789
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4354
{
4790
{
4355
	struct drm_device *dev = crtc->dev;
4791
	struct drm_device *dev = crtc->dev;
4356
	struct drm_i915_private *dev_priv = dev->dev_private;
4792
	struct drm_i915_private *dev_priv = dev->dev_private;
4357
	int refclk;
4793
	int refclk;
4358
 
4794
 
4359
	if (IS_VALLEYVIEW(dev)) {
4795
	if (IS_VALLEYVIEW(dev)) {
4360
		refclk = vlv_get_refclk(crtc);
4796
		refclk = 100000;
4361
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4797
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4362
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4798
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4363
		refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
4799
		refclk = dev_priv->vbt.lvds_ssc_freq;
4364
		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4800
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
4365
			      refclk / 1000);
-
 
4366
	} else if (!IS_GEN2(dev)) {
4801
	} else if (!IS_GEN2(dev)) {
4367
		refclk = 96000;
4802
		refclk = 96000;
4368
	} else {
4803
	} else {
4369
		refclk = 48000;
4804
		refclk = 48000;
4370
	}
4805
	}
4371
 
4806
 
4372
	return refclk;
4807
	return refclk;
4373
}
4808
}
4374
 
4809
 
4375
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
4810
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
4376
{
4811
{
4377
	return (1 << dpll->n) << 16 | dpll->m2;
4812
	return (1 << dpll->n) << 16 | dpll->m2;
4378
}
4813
}
4379
 
4814
 
4380
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
4815
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
4381
{
4816
{
4382
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
4817
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
4383
}
4818
}
4384
 
4819
 
4385
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4820
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4386
				     intel_clock_t *reduced_clock)
4821
				     intel_clock_t *reduced_clock)
4387
{
4822
{
4388
	struct drm_device *dev = crtc->base.dev;
4823
	struct drm_device *dev = crtc->base.dev;
4389
	struct drm_i915_private *dev_priv = dev->dev_private;
4824
	struct drm_i915_private *dev_priv = dev->dev_private;
4390
	int pipe = crtc->pipe;
4825
	int pipe = crtc->pipe;
4391
	u32 fp, fp2 = 0;
4826
	u32 fp, fp2 = 0;
4392
 
4827
 
4393
	if (IS_PINEVIEW(dev)) {
4828
	if (IS_PINEVIEW(dev)) {
4394
		fp = pnv_dpll_compute_fp(&crtc->config.dpll);
4829
		fp = pnv_dpll_compute_fp(&crtc->config.dpll);
4395
		if (reduced_clock)
4830
		if (reduced_clock)
4396
			fp2 = pnv_dpll_compute_fp(reduced_clock);
4831
			fp2 = pnv_dpll_compute_fp(reduced_clock);
4397
	} else {
4832
	} else {
4398
		fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
4833
		fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
4399
		if (reduced_clock)
4834
		if (reduced_clock)
4400
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
4835
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
4401
	}
4836
	}
4402
 
4837
 
4403
	I915_WRITE(FP0(pipe), fp);
4838
	I915_WRITE(FP0(pipe), fp);
4404
	crtc->config.dpll_hw_state.fp0 = fp;
4839
	crtc->config.dpll_hw_state.fp0 = fp;
4405
 
4840
 
4406
	crtc->lowfreq_avail = false;
4841
	crtc->lowfreq_avail = false;
4407
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4842
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4408
	    reduced_clock && i915_powersave) {
4843
	    reduced_clock && i915_powersave) {
4409
		I915_WRITE(FP1(pipe), fp2);
4844
		I915_WRITE(FP1(pipe), fp2);
4410
		crtc->config.dpll_hw_state.fp1 = fp2;
4845
		crtc->config.dpll_hw_state.fp1 = fp2;
4411
		crtc->lowfreq_avail = true;
4846
		crtc->lowfreq_avail = true;
4412
	} else {
4847
	} else {
4413
		I915_WRITE(FP1(pipe), fp);
4848
		I915_WRITE(FP1(pipe), fp);
4414
		crtc->config.dpll_hw_state.fp1 = fp;
4849
		crtc->config.dpll_hw_state.fp1 = fp;
4415
	}
4850
	}
4416
}
4851
}
4417
 
4852
 
-
 
4853
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
4418
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
4854
		pipe)
4419
{
4855
{
4420
	u32 reg_val;
4856
	u32 reg_val;
4421
 
4857
 
4422
	/*
4858
	/*
4423
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
4859
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
4424
	 * and set it to a reasonable value instead.
4860
	 * and set it to a reasonable value instead.
4425
	 */
4861
	 */
4426
	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
4862
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4427
	reg_val &= 0xffffff00;
4863
	reg_val &= 0xffffff00;
4428
	reg_val |= 0x00000030;
4864
	reg_val |= 0x00000030;
4429
	vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
4865
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4430
 
4866
 
4431
	reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
4867
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4432
	reg_val &= 0x8cffffff;
4868
	reg_val &= 0x8cffffff;
4433
	reg_val = 0x8c000000;
4869
	reg_val = 0x8c000000;
4434
	vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
4870
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4435
 
4871
 
4436
	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
4872
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4437
	reg_val &= 0xffffff00;
4873
	reg_val &= 0xffffff00;
4438
	vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
4874
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4439
 
4875
 
4440
	reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
4876
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4441
	reg_val &= 0x00ffffff;
4877
	reg_val &= 0x00ffffff;
4442
	reg_val |= 0xb0000000;
4878
	reg_val |= 0xb0000000;
4443
	vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
4879
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4444
}
4880
}
4445
 
4881
 
4446
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
4882
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
4447
					 struct intel_link_m_n *m_n)
4883
					 struct intel_link_m_n *m_n)
4448
{
4884
{
4449
	struct drm_device *dev = crtc->base.dev;
4885
	struct drm_device *dev = crtc->base.dev;
4450
	struct drm_i915_private *dev_priv = dev->dev_private;
4886
	struct drm_i915_private *dev_priv = dev->dev_private;
4451
	int pipe = crtc->pipe;
4887
	int pipe = crtc->pipe;
4452
 
4888
 
4453
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4889
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4454
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4890
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4455
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4891
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4456
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4892
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4457
}
4893
}
4458
 
4894
 
4459
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
4895
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
4460
					 struct intel_link_m_n *m_n)
4896
					 struct intel_link_m_n *m_n)
4461
{
4897
{
4462
	struct drm_device *dev = crtc->base.dev;
4898
	struct drm_device *dev = crtc->base.dev;
4463
	struct drm_i915_private *dev_priv = dev->dev_private;
4899
	struct drm_i915_private *dev_priv = dev->dev_private;
4464
	int pipe = crtc->pipe;
4900
	int pipe = crtc->pipe;
4465
	enum transcoder transcoder = crtc->config.cpu_transcoder;
4901
	enum transcoder transcoder = crtc->config.cpu_transcoder;
4466
 
4902
 
4467
	if (INTEL_INFO(dev)->gen >= 5) {
4903
	if (INTEL_INFO(dev)->gen >= 5) {
4468
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
4904
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
4469
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
4905
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
4470
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
4906
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
4471
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
4907
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
4472
	} else {
4908
	} else {
4473
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4909
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4474
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4910
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4475
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
4911
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
4476
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
4912
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
4477
	}
4913
	}
4478
}
4914
}
4479
 
4915
 
4480
static void intel_dp_set_m_n(struct intel_crtc *crtc)
4916
static void intel_dp_set_m_n(struct intel_crtc *crtc)
4481
{
4917
{
4482
	if (crtc->config.has_pch_encoder)
4918
	if (crtc->config.has_pch_encoder)
4483
		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4919
		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4484
	else
4920
	else
4485
		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4921
		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4486
}
4922
}
4487
 
4923
 
4488
static void vlv_update_pll(struct intel_crtc *crtc)
4924
static void vlv_update_pll(struct intel_crtc *crtc)
4489
{
4925
{
4490
	struct drm_device *dev = crtc->base.dev;
4926
	struct drm_device *dev = crtc->base.dev;
4491
	struct drm_i915_private *dev_priv = dev->dev_private;
4927
	struct drm_i915_private *dev_priv = dev->dev_private;
4492
	int pipe = crtc->pipe;
4928
	int pipe = crtc->pipe;
4493
	u32 dpll, mdiv;
4929
	u32 dpll, mdiv;
4494
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
4930
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
4495
	u32 coreclk, reg_val, dpll_md;
4931
	u32 coreclk, reg_val, dpll_md;
4496
 
4932
 
4497
	mutex_lock(&dev_priv->dpio_lock);
4933
	mutex_lock(&dev_priv->dpio_lock);
4498
 
4934
 
4499
	bestn = crtc->config.dpll.n;
4935
	bestn = crtc->config.dpll.n;
4500
	bestm1 = crtc->config.dpll.m1;
4936
	bestm1 = crtc->config.dpll.m1;
4501
	bestm2 = crtc->config.dpll.m2;
4937
	bestm2 = crtc->config.dpll.m2;
4502
	bestp1 = crtc->config.dpll.p1;
4938
	bestp1 = crtc->config.dpll.p1;
4503
	bestp2 = crtc->config.dpll.p2;
4939
	bestp2 = crtc->config.dpll.p2;
4504
 
4940
 
4505
	/* See eDP HDMI DPIO driver vbios notes doc */
4941
	/* See eDP HDMI DPIO driver vbios notes doc */
4506
 
4942
 
4507
	/* PLL B needs special handling */
4943
	/* PLL B needs special handling */
4508
	if (pipe)
4944
	if (pipe)
4509
		vlv_pllb_recal_opamp(dev_priv);
4945
		vlv_pllb_recal_opamp(dev_priv, pipe);
4510
 
4946
 
4511
	/* Set up Tx target for periodic Rcomp update */
4947
	/* Set up Tx target for periodic Rcomp update */
4512
	vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
4948
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
4513
 
4949
 
4514
	/* Disable target IRef on PLL */
4950
	/* Disable target IRef on PLL */
4515
	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
4951
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
4516
	reg_val &= 0x00ffffff;
4952
	reg_val &= 0x00ffffff;
4517
	vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
4953
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
4518
 
4954
 
4519
	/* Disable fast lock */
4955
	/* Disable fast lock */
4520
	vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
4956
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
4521
 
4957
 
4522
	/* Set idtafcrecal before PLL is enabled */
4958
	/* Set idtafcrecal before PLL is enabled */
4523
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4959
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4524
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4960
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4525
	mdiv |= ((bestn << DPIO_N_SHIFT));
4961
	mdiv |= ((bestn << DPIO_N_SHIFT));
4526
	mdiv |= (1 << DPIO_K_SHIFT);
4962
	mdiv |= (1 << DPIO_K_SHIFT);
4527
 
4963
 
4528
	/*
4964
	/*
4529
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
4965
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
4530
	 * but we don't support that).
4966
	 * but we don't support that).
4531
	 * Note: don't use the DAC post divider as it seems unstable.
4967
	 * Note: don't use the DAC post divider as it seems unstable.
4532
	 */
4968
	 */
4533
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4969
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4534
	vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
4970
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4535
 
4971
 
4536
	mdiv |= DPIO_ENABLE_CALIBRATION;
4972
	mdiv |= DPIO_ENABLE_CALIBRATION;
4537
	vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
4973
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4538
 
4974
 
4539
	/* Set HBR and RBR LPF coefficients */
4975
	/* Set HBR and RBR LPF coefficients */
4540
	if (crtc->config.port_clock == 162000 ||
4976
	if (crtc->config.port_clock == 162000 ||
4541
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4977
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4542
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4978
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4543
		vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4979
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4544
				 0x009f0003);
4980
				 0x009f0003);
4545
	else
4981
	else
4546
		vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
4982
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4547
				 0x00d0000f);
4983
				 0x00d0000f);
4548
 
4984
 
4549
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4985
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4550
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
4986
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
4551
		/* Use SSC source */
4987
		/* Use SSC source */
4552
		if (!pipe)
4988
		if (!pipe)
4553
			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4989
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4554
					 0x0df40000);
4990
					 0x0df40000);
4555
		else
4991
		else
4556
			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4992
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4557
					 0x0df70000);
4993
					 0x0df70000);
4558
	} else { /* HDMI or VGA */
4994
	} else { /* HDMI or VGA */
4559
		/* Use bend source */
4995
		/* Use bend source */
4560
		if (!pipe)
4996
		if (!pipe)
4561
			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
4997
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4562
					 0x0df70000);
4998
					 0x0df70000);
4563
		else
4999
		else
4564
			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
5000
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4565
					 0x0df40000);
5001
					 0x0df40000);
4566
	}
5002
	}
4567
 
5003
 
4568
	coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
5004
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
4569
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5005
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
4570
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5006
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
4571
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5007
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
4572
		coreclk |= 0x01000000;
5008
		coreclk |= 0x01000000;
4573
	vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
5009
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
4574
 
5010
 
-
 
5011
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
4575
	vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
5012
 
-
 
5013
	/*
-
 
5014
	 * Enable DPIO clock input. We should never disable the reference
-
 
5015
	 * clock for pipe B, since VGA hotplug / manual detection depends
4576
 
5016
	 * on it.
4577
	/* Enable DPIO clock input */
5017
	 */
4578
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5018
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4579
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5019
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
4580
	/* We should never disable this, set it here for state tracking */
5020
	/* We should never disable this, set it here for state tracking */
4581
	if (pipe == PIPE_B)
5021
	if (pipe == PIPE_B)
4582
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5022
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
4583
	dpll |= DPLL_VCO_ENABLE;
5023
	dpll |= DPLL_VCO_ENABLE;
4584
	crtc->config.dpll_hw_state.dpll = dpll;
5024
	crtc->config.dpll_hw_state.dpll = dpll;
4585
 
5025
 
4586
	dpll_md = (crtc->config.pixel_multiplier - 1)
5026
	dpll_md = (crtc->config.pixel_multiplier - 1)
4587
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5027
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
4588
	crtc->config.dpll_hw_state.dpll_md = dpll_md;
5028
	crtc->config.dpll_hw_state.dpll_md = dpll_md;
4589
 
5029
 
4590
	if (crtc->config.has_dp_encoder)
5030
	if (crtc->config.has_dp_encoder)
4591
		intel_dp_set_m_n(crtc);
5031
		intel_dp_set_m_n(crtc);
4592
 
5032
 
4593
	mutex_unlock(&dev_priv->dpio_lock);
5033
	mutex_unlock(&dev_priv->dpio_lock);
4594
}
5034
}
4595
 
5035
 
4596
static void i9xx_update_pll(struct intel_crtc *crtc,
5036
static void i9xx_update_pll(struct intel_crtc *crtc,
4597
			    intel_clock_t *reduced_clock,
5037
			    intel_clock_t *reduced_clock,
4598
			    int num_connectors)
5038
			    int num_connectors)
4599
{
5039
{
4600
	struct drm_device *dev = crtc->base.dev;
5040
	struct drm_device *dev = crtc->base.dev;
4601
	struct drm_i915_private *dev_priv = dev->dev_private;
5041
	struct drm_i915_private *dev_priv = dev->dev_private;
4602
	u32 dpll;
5042
	u32 dpll;
4603
	bool is_sdvo;
5043
	bool is_sdvo;
4604
	struct dpll *clock = &crtc->config.dpll;
5044
	struct dpll *clock = &crtc->config.dpll;
4605
 
5045
 
4606
	i9xx_update_pll_dividers(crtc, reduced_clock);
5046
	i9xx_update_pll_dividers(crtc, reduced_clock);
4607
 
5047
 
4608
	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
5048
	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
4609
		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
5049
		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
4610
 
5050
 
4611
	dpll = DPLL_VGA_MODE_DIS;
5051
	dpll = DPLL_VGA_MODE_DIS;
4612
 
5052
 
4613
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
5053
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
4614
		dpll |= DPLLB_MODE_LVDS;
5054
		dpll |= DPLLB_MODE_LVDS;
4615
	else
5055
	else
4616
		dpll |= DPLLB_MODE_DAC_SERIAL;
5056
		dpll |= DPLLB_MODE_DAC_SERIAL;
4617
 
5057
 
4618
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5058
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
4619
			dpll |= (crtc->config.pixel_multiplier - 1)
5059
			dpll |= (crtc->config.pixel_multiplier - 1)
4620
				<< SDVO_MULTIPLIER_SHIFT_HIRES;
5060
				<< SDVO_MULTIPLIER_SHIFT_HIRES;
4621
		}
5061
		}
4622
 
5062
 
4623
	if (is_sdvo)
5063
	if (is_sdvo)
4624
		dpll |= DPLL_SDVO_HIGH_SPEED;
5064
		dpll |= DPLL_SDVO_HIGH_SPEED;
4625
 
5065
 
4626
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
5066
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
4627
		dpll |= DPLL_SDVO_HIGH_SPEED;
5067
		dpll |= DPLL_SDVO_HIGH_SPEED;
4628
 
5068
 
4629
	/* compute bitmask from p1 value */
5069
	/* compute bitmask from p1 value */
4630
	if (IS_PINEVIEW(dev))
5070
	if (IS_PINEVIEW(dev))
4631
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5071
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4632
	else {
5072
	else {
4633
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5073
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4634
		if (IS_G4X(dev) && reduced_clock)
5074
		if (IS_G4X(dev) && reduced_clock)
4635
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5075
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4636
	}
5076
	}
4637
	switch (clock->p2) {
5077
	switch (clock->p2) {
4638
	case 5:
5078
	case 5:
4639
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5079
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4640
		break;
5080
		break;
4641
	case 7:
5081
	case 7:
4642
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5082
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4643
		break;
5083
		break;
4644
	case 10:
5084
	case 10:
4645
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5085
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4646
		break;
5086
		break;
4647
	case 14:
5087
	case 14:
4648
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5088
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4649
		break;
5089
		break;
4650
	}
5090
	}
4651
	if (INTEL_INFO(dev)->gen >= 4)
5091
	if (INTEL_INFO(dev)->gen >= 4)
4652
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5092
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4653
 
5093
 
4654
	if (crtc->config.sdvo_tv_clock)
5094
	if (crtc->config.sdvo_tv_clock)
4655
		dpll |= PLL_REF_INPUT_TVCLKINBC;
5095
		dpll |= PLL_REF_INPUT_TVCLKINBC;
4656
	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5096
	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4657
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5097
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4658
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5098
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4659
	else
5099
	else
4660
		dpll |= PLL_REF_INPUT_DREFCLK;
5100
		dpll |= PLL_REF_INPUT_DREFCLK;
4661
 
5101
 
4662
	dpll |= DPLL_VCO_ENABLE;
5102
	dpll |= DPLL_VCO_ENABLE;
4663
	crtc->config.dpll_hw_state.dpll = dpll;
5103
	crtc->config.dpll_hw_state.dpll = dpll;
4664
 
5104
 
4665
	if (INTEL_INFO(dev)->gen >= 4) {
5105
	if (INTEL_INFO(dev)->gen >= 4) {
4666
		u32 dpll_md = (crtc->config.pixel_multiplier - 1)
5106
		u32 dpll_md = (crtc->config.pixel_multiplier - 1)
4667
					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5107
					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
4668
		crtc->config.dpll_hw_state.dpll_md = dpll_md;
5108
		crtc->config.dpll_hw_state.dpll_md = dpll_md;
4669
	}
5109
	}
4670
 
5110
 
4671
	if (crtc->config.has_dp_encoder)
5111
	if (crtc->config.has_dp_encoder)
4672
		intel_dp_set_m_n(crtc);
5112
		intel_dp_set_m_n(crtc);
4673
}
5113
}
4674
 
5114
 
4675
static void i8xx_update_pll(struct intel_crtc *crtc,
5115
static void i8xx_update_pll(struct intel_crtc *crtc,
4676
			    intel_clock_t *reduced_clock,
5116
			    intel_clock_t *reduced_clock,
4677
			    int num_connectors)
5117
			    int num_connectors)
4678
{
5118
{
4679
	struct drm_device *dev = crtc->base.dev;
5119
	struct drm_device *dev = crtc->base.dev;
4680
	struct drm_i915_private *dev_priv = dev->dev_private;
5120
	struct drm_i915_private *dev_priv = dev->dev_private;
4681
	u32 dpll;
5121
	u32 dpll;
4682
	struct dpll *clock = &crtc->config.dpll;
5122
	struct dpll *clock = &crtc->config.dpll;
4683
 
5123
 
4684
	i9xx_update_pll_dividers(crtc, reduced_clock);
5124
	i9xx_update_pll_dividers(crtc, reduced_clock);
4685
 
5125
 
4686
	dpll = DPLL_VGA_MODE_DIS;
5126
	dpll = DPLL_VGA_MODE_DIS;
4687
 
5127
 
4688
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
5128
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
4689
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5129
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4690
	} else {
5130
	} else {
4691
		if (clock->p1 == 2)
5131
		if (clock->p1 == 2)
4692
			dpll |= PLL_P1_DIVIDE_BY_TWO;
5132
			dpll |= PLL_P1_DIVIDE_BY_TWO;
4693
		else
5133
		else
4694
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5134
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4695
		if (clock->p2 == 4)
5135
		if (clock->p2 == 4)
4696
			dpll |= PLL_P2_DIVIDE_BY_4;
5136
			dpll |= PLL_P2_DIVIDE_BY_4;
4697
	}
5137
	}
4698
 
5138
 
4699
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5139
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
4700
		dpll |= DPLL_DVO_2X_MODE;
5140
		dpll |= DPLL_DVO_2X_MODE;
4701
 
5141
 
4702
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5142
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4703
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5143
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4704
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5144
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4705
	else
5145
	else
4706
		dpll |= PLL_REF_INPUT_DREFCLK;
5146
		dpll |= PLL_REF_INPUT_DREFCLK;
4707
 
5147
 
4708
	dpll |= DPLL_VCO_ENABLE;
5148
	dpll |= DPLL_VCO_ENABLE;
4709
	crtc->config.dpll_hw_state.dpll = dpll;
5149
	crtc->config.dpll_hw_state.dpll = dpll;
4710
}
5150
}
4711
 
5151
 
4712
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5152
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
4713
{
5153
{
4714
	struct drm_device *dev = intel_crtc->base.dev;
5154
	struct drm_device *dev = intel_crtc->base.dev;
4715
	struct drm_i915_private *dev_priv = dev->dev_private;
5155
	struct drm_i915_private *dev_priv = dev->dev_private;
4716
	enum pipe pipe = intel_crtc->pipe;
5156
	enum pipe pipe = intel_crtc->pipe;
4717
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5157
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4718
	struct drm_display_mode *adjusted_mode =
5158
	struct drm_display_mode *adjusted_mode =
4719
		&intel_crtc->config.adjusted_mode;
5159
		&intel_crtc->config.adjusted_mode;
4720
	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
-
 
4721
	uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
5160
	uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
4722
 
5161
 
4723
	/* We need to be careful not to changed the adjusted mode, for otherwise
5162
	/* We need to be careful not to changed the adjusted mode, for otherwise
4724
	 * the hw state checker will get angry at the mismatch. */
5163
	 * the hw state checker will get angry at the mismatch. */
4725
	crtc_vtotal = adjusted_mode->crtc_vtotal;
5164
	crtc_vtotal = adjusted_mode->crtc_vtotal;
4726
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5165
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4727
 
5166
 
4728
	if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5167
	if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4729
		/* the chip adds 2 halflines automatically */
5168
		/* the chip adds 2 halflines automatically */
4730
		crtc_vtotal -= 1;
5169
		crtc_vtotal -= 1;
4731
		crtc_vblank_end -= 1;
5170
		crtc_vblank_end -= 1;
4732
		vsyncshift = adjusted_mode->crtc_hsync_start
5171
		vsyncshift = adjusted_mode->crtc_hsync_start
4733
			     - adjusted_mode->crtc_htotal / 2;
5172
			     - adjusted_mode->crtc_htotal / 2;
4734
	} else {
5173
	} else {
4735
		vsyncshift = 0;
5174
		vsyncshift = 0;
4736
	}
5175
	}
4737
 
5176
 
4738
	if (INTEL_INFO(dev)->gen > 3)
5177
	if (INTEL_INFO(dev)->gen > 3)
4739
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
5178
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
4740
 
5179
 
4741
	I915_WRITE(HTOTAL(cpu_transcoder),
5180
	I915_WRITE(HTOTAL(cpu_transcoder),
4742
		   (adjusted_mode->crtc_hdisplay - 1) |
5181
		   (adjusted_mode->crtc_hdisplay - 1) |
4743
		   ((adjusted_mode->crtc_htotal - 1) << 16));
5182
		   ((adjusted_mode->crtc_htotal - 1) << 16));
4744
	I915_WRITE(HBLANK(cpu_transcoder),
5183
	I915_WRITE(HBLANK(cpu_transcoder),
4745
		   (adjusted_mode->crtc_hblank_start - 1) |
5184
		   (adjusted_mode->crtc_hblank_start - 1) |
4746
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5185
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
4747
	I915_WRITE(HSYNC(cpu_transcoder),
5186
	I915_WRITE(HSYNC(cpu_transcoder),
4748
		   (adjusted_mode->crtc_hsync_start - 1) |
5187
		   (adjusted_mode->crtc_hsync_start - 1) |
4749
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5188
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
4750
 
5189
 
4751
	I915_WRITE(VTOTAL(cpu_transcoder),
5190
	I915_WRITE(VTOTAL(cpu_transcoder),
4752
		   (adjusted_mode->crtc_vdisplay - 1) |
5191
		   (adjusted_mode->crtc_vdisplay - 1) |
4753
		   ((crtc_vtotal - 1) << 16));
5192
		   ((crtc_vtotal - 1) << 16));
4754
	I915_WRITE(VBLANK(cpu_transcoder),
5193
	I915_WRITE(VBLANK(cpu_transcoder),
4755
		   (adjusted_mode->crtc_vblank_start - 1) |
5194
		   (adjusted_mode->crtc_vblank_start - 1) |
4756
		   ((crtc_vblank_end - 1) << 16));
5195
		   ((crtc_vblank_end - 1) << 16));
4757
	I915_WRITE(VSYNC(cpu_transcoder),
5196
	I915_WRITE(VSYNC(cpu_transcoder),
4758
		   (adjusted_mode->crtc_vsync_start - 1) |
5197
		   (adjusted_mode->crtc_vsync_start - 1) |
4759
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5198
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
4760
 
5199
 
4761
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5200
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4762
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5201
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4763
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
5202
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4764
	 * bits. */
5203
	 * bits. */
4765
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
5204
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
4766
	    (pipe == PIPE_B || pipe == PIPE_C))
5205
	    (pipe == PIPE_B || pipe == PIPE_C))
4767
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
5206
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
4768
 
5207
 
4769
	/* pipesrc controls the size that is scaled from, which should
5208
	/* pipesrc controls the size that is scaled from, which should
4770
	 * always be the user's requested size.
5209
	 * always be the user's requested size.
4771
	 */
5210
	 */
4772
	I915_WRITE(PIPESRC(pipe),
5211
	I915_WRITE(PIPESRC(pipe),
-
 
5212
		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
4773
		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5213
		   (intel_crtc->config.pipe_src_h - 1));
4774
}
5214
}
4775
 
5215
 
4776
static void intel_get_pipe_timings(struct intel_crtc *crtc,
5216
static void intel_get_pipe_timings(struct intel_crtc *crtc,
4777
				   struct intel_crtc_config *pipe_config)
5217
				   struct intel_crtc_config *pipe_config)
4778
{
5218
{
4779
	struct drm_device *dev = crtc->base.dev;
5219
	struct drm_device *dev = crtc->base.dev;
4780
	struct drm_i915_private *dev_priv = dev->dev_private;
5220
	struct drm_i915_private *dev_priv = dev->dev_private;
4781
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5221
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4782
	uint32_t tmp;
5222
	uint32_t tmp;
4783
 
5223
 
4784
	tmp = I915_READ(HTOTAL(cpu_transcoder));
5224
	tmp = I915_READ(HTOTAL(cpu_transcoder));
4785
	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5225
	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4786
	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5226
	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4787
	tmp = I915_READ(HBLANK(cpu_transcoder));
5227
	tmp = I915_READ(HBLANK(cpu_transcoder));
4788
	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
5228
	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
4789
	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
5229
	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
4790
	tmp = I915_READ(HSYNC(cpu_transcoder));
5230
	tmp = I915_READ(HSYNC(cpu_transcoder));
4791
	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5231
	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4792
	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5232
	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4793
 
5233
 
4794
	tmp = I915_READ(VTOTAL(cpu_transcoder));
5234
	tmp = I915_READ(VTOTAL(cpu_transcoder));
4795
	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5235
	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4796
	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5236
	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4797
	tmp = I915_READ(VBLANK(cpu_transcoder));
5237
	tmp = I915_READ(VBLANK(cpu_transcoder));
4798
	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
5238
	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
4799
	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
5239
	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
4800
	tmp = I915_READ(VSYNC(cpu_transcoder));
5240
	tmp = I915_READ(VSYNC(cpu_transcoder));
4801
	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5241
	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4802
	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5242
	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4803
 
5243
 
4804
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
5244
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
4805
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5245
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4806
		pipe_config->adjusted_mode.crtc_vtotal += 1;
5246
		pipe_config->adjusted_mode.crtc_vtotal += 1;
4807
		pipe_config->adjusted_mode.crtc_vblank_end += 1;
5247
		pipe_config->adjusted_mode.crtc_vblank_end += 1;
4808
	}
5248
	}
4809
 
5249
 
4810
	tmp = I915_READ(PIPESRC(crtc->pipe));
5250
	tmp = I915_READ(PIPESRC(crtc->pipe));
4811
	pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
5251
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4812
	pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
5252
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
-
 
5253
 
-
 
5254
	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
-
 
5255
	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
4813
}
5256
}
4814
 
5257
 
4815
static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
5258
static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
4816
					     struct intel_crtc_config *pipe_config)
5259
					     struct intel_crtc_config *pipe_config)
4817
{
5260
{
4818
	struct drm_crtc *crtc = &intel_crtc->base;
5261
	struct drm_crtc *crtc = &intel_crtc->base;
4819
 
5262
 
4820
	crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5263
	crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
4821
	crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
5264
	crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
4822
	crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5265
	crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
4823
	crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5266
	crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
4824
 
5267
 
4825
	crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5268
	crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
4826
	crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5269
	crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
4827
	crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5270
	crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
4828
	crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
5271
	crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
4829
 
5272
 
4830
	crtc->mode.flags = pipe_config->adjusted_mode.flags;
5273
	crtc->mode.flags = pipe_config->adjusted_mode.flags;
4831
 
5274
 
4832
	crtc->mode.clock = pipe_config->adjusted_mode.clock;
5275
	crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
4833
	crtc->mode.flags |= pipe_config->adjusted_mode.flags;
5276
	crtc->mode.flags |= pipe_config->adjusted_mode.flags;
4834
}
5277
}
4835
 
5278
 
4836
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5279
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4837
{
5280
{
4838
	struct drm_device *dev = intel_crtc->base.dev;
5281
	struct drm_device *dev = intel_crtc->base.dev;
4839
	struct drm_i915_private *dev_priv = dev->dev_private;
5282
	struct drm_i915_private *dev_priv = dev->dev_private;
4840
	uint32_t pipeconf;
5283
	uint32_t pipeconf;
4841
 
5284
 
4842
	pipeconf = 0;
5285
	pipeconf = 0;
4843
 
5286
 
4844
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
5287
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
4845
	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
5288
	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
4846
		pipeconf |= PIPECONF_ENABLE;
5289
		pipeconf |= PIPECONF_ENABLE;
4847
 
-
 
4848
	if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
-
 
4849
		/* Enable pixel doubling when the dot clock is > 90% of the (display)
-
 
4850
		 * core speed.
-
 
4851
		 *
-
 
4852
		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
-
 
4853
		 * pipe == 0 check?
-
 
4854
		 */
5290
 
4855
		if (intel_crtc->config.requested_mode.clock >
-
 
4856
		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5291
	if (intel_crtc->config.double_wide)
4857
			pipeconf |= PIPECONF_DOUBLE_WIDE;
-
 
4858
	}
5292
			pipeconf |= PIPECONF_DOUBLE_WIDE;
4859
 
5293
 
4860
	/* only g4x and later have fancy bpc/dither controls */
5294
	/* only g4x and later have fancy bpc/dither controls */
4861
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5295
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
4862
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
5296
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
4863
		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
5297
		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
4864
			pipeconf |= PIPECONF_DITHER_EN |
5298
			pipeconf |= PIPECONF_DITHER_EN |
4865
				    PIPECONF_DITHER_TYPE_SP;
5299
				    PIPECONF_DITHER_TYPE_SP;
4866
 
5300
 
4867
		switch (intel_crtc->config.pipe_bpp) {
5301
		switch (intel_crtc->config.pipe_bpp) {
4868
		case 18:
5302
		case 18:
4869
			pipeconf |= PIPECONF_6BPC;
5303
			pipeconf |= PIPECONF_6BPC;
4870
			break;
5304
			break;
4871
		case 24:
5305
		case 24:
4872
			pipeconf |= PIPECONF_8BPC;
5306
			pipeconf |= PIPECONF_8BPC;
4873
			break;
5307
			break;
4874
		case 30:
5308
		case 30:
4875
			pipeconf |= PIPECONF_10BPC;
5309
			pipeconf |= PIPECONF_10BPC;
4876
			break;
5310
			break;
4877
		default:
5311
		default:
4878
			/* Case prevented by intel_choose_pipe_bpp_dither. */
5312
			/* Case prevented by intel_choose_pipe_bpp_dither. */
4879
			BUG();
5313
			BUG();
4880
		}
5314
		}
4881
	}
5315
	}
4882
 
5316
 
4883
	if (HAS_PIPE_CXSR(dev)) {
5317
	if (HAS_PIPE_CXSR(dev)) {
4884
		if (intel_crtc->lowfreq_avail) {
5318
		if (intel_crtc->lowfreq_avail) {
4885
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5319
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4886
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5320
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4887
		} else {
5321
		} else {
4888
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5322
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4889
		}
5323
		}
4890
	}
5324
	}
4891
 
5325
 
4892
	if (!IS_GEN2(dev) &&
5326
	if (!IS_GEN2(dev) &&
4893
	    intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5327
	    intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
4894
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5328
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4895
	else
5329
	else
4896
		pipeconf |= PIPECONF_PROGRESSIVE;
5330
		pipeconf |= PIPECONF_PROGRESSIVE;
4897
 
5331
 
4898
	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
5332
	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
4899
			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
5333
			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4900
 
5334
 
4901
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
5335
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
4902
	POSTING_READ(PIPECONF(intel_crtc->pipe));
5336
	POSTING_READ(PIPECONF(intel_crtc->pipe));
4903
}
5337
}
4904
 
5338
 
4905
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5339
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4906
			      int x, int y,
5340
			      int x, int y,
4907
			      struct drm_framebuffer *fb)
5341
			      struct drm_framebuffer *fb)
4908
{
5342
{
4909
	struct drm_device *dev = crtc->dev;
5343
	struct drm_device *dev = crtc->dev;
4910
	struct drm_i915_private *dev_priv = dev->dev_private;
5344
	struct drm_i915_private *dev_priv = dev->dev_private;
4911
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5345
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4912
	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
-
 
4913
	int pipe = intel_crtc->pipe;
5346
	int pipe = intel_crtc->pipe;
4914
	int plane = intel_crtc->plane;
5347
	int plane = intel_crtc->plane;
4915
	int refclk, num_connectors = 0;
5348
	int refclk, num_connectors = 0;
4916
	intel_clock_t clock, reduced_clock;
5349
	intel_clock_t clock, reduced_clock;
4917
	u32 dspcntr;
5350
	u32 dspcntr;
4918
	bool ok, has_reduced_clock = false;
5351
	bool ok, has_reduced_clock = false;
4919
	bool is_lvds = false;
5352
	bool is_lvds = false, is_dsi = false;
4920
	struct intel_encoder *encoder;
5353
	struct intel_encoder *encoder;
4921
	const intel_limit_t *limit;
5354
	const intel_limit_t *limit;
4922
	int ret;
5355
	int ret;
4923
 
5356
 
4924
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5357
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4925
		switch (encoder->type) {
5358
		switch (encoder->type) {
4926
		case INTEL_OUTPUT_LVDS:
5359
		case INTEL_OUTPUT_LVDS:
4927
			is_lvds = true;
5360
			is_lvds = true;
4928
			break;
5361
			break;
-
 
5362
		case INTEL_OUTPUT_DSI:
-
 
5363
			is_dsi = true;
-
 
5364
			break;
4929
		}
5365
		}
4930
 
5366
 
4931
		num_connectors++;
5367
		num_connectors++;
4932
	}
5368
	}
-
 
5369
 
-
 
5370
	if (is_dsi)
-
 
5371
		goto skip_dpll;
-
 
5372
 
4933
 
5373
	if (!intel_crtc->config.clock_set) {
4934
	refclk = i9xx_get_refclk(crtc, num_connectors);
5374
	refclk = i9xx_get_refclk(crtc, num_connectors);
4935
 
5375
 
4936
	/*
5376
	/*
4937
	 * Returns a set of divisors for the desired target clock with the given
5377
		 * Returns a set of divisors for the desired target clock with
4938
	 * refclk, or FALSE.  The returned values represent the clock equation:
5378
		 * the given refclk, or FALSE.  The returned values represent
4939
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5379
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
-
 
5380
		 * 2) / p1 / p2.
4940
	 */
5381
	 */
4941
	limit = intel_limit(crtc, refclk);
5382
	limit = intel_limit(crtc, refclk);
4942
	ok = dev_priv->display.find_dpll(limit, crtc,
5383
	ok = dev_priv->display.find_dpll(limit, crtc,
4943
					 intel_crtc->config.port_clock,
5384
					 intel_crtc->config.port_clock,
4944
					 refclk, NULL, &clock);
5385
					 refclk, NULL, &clock);
4945
	if (!ok && !intel_crtc->config.clock_set) {
5386
		if (!ok) {
4946
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5387
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
4947
		return -EINVAL;
5388
		return -EINVAL;
4948
	}
5389
	}
4949
 
5390
 
4950
	if (is_lvds && dev_priv->lvds_downclock_avail) {
5391
	if (is_lvds && dev_priv->lvds_downclock_avail) {
4951
		/*
5392
		/*
4952
		 * Ensure we match the reduced clock's P to the target clock.
5393
			 * Ensure we match the reduced clock's P to the target
4953
		 * If the clocks don't match, we can't switch the display clock
5394
			 * clock.  If the clocks don't match, we can't switch
4954
		 * by using the FP0/FP1. In such case we will disable the LVDS
5395
			 * the display clock by using the FP0/FP1. In such case
4955
		 * downclock feature.
5396
			 * we will disable the LVDS downclock feature.
4956
		*/
5397
		*/
4957
		has_reduced_clock =
5398
		has_reduced_clock =
4958
			dev_priv->display.find_dpll(limit, crtc,
5399
			dev_priv->display.find_dpll(limit, crtc,
4959
						    dev_priv->lvds_downclock,
5400
						    dev_priv->lvds_downclock,
4960
						    refclk, &clock,
5401
						    refclk, &clock,
4961
						    &reduced_clock);
5402
						    &reduced_clock);
4962
	}
5403
	}
4963
	/* Compat-code for transition, will disappear. */
5404
	/* Compat-code for transition, will disappear. */
4964
	if (!intel_crtc->config.clock_set) {
-
 
4965
		intel_crtc->config.dpll.n = clock.n;
5405
		intel_crtc->config.dpll.n = clock.n;
4966
		intel_crtc->config.dpll.m1 = clock.m1;
5406
		intel_crtc->config.dpll.m1 = clock.m1;
4967
		intel_crtc->config.dpll.m2 = clock.m2;
5407
		intel_crtc->config.dpll.m2 = clock.m2;
4968
		intel_crtc->config.dpll.p1 = clock.p1;
5408
		intel_crtc->config.dpll.p1 = clock.p1;
4969
		intel_crtc->config.dpll.p2 = clock.p2;
5409
		intel_crtc->config.dpll.p2 = clock.p2;
4970
	}
5410
	}
4971
 
5411
 
4972
	if (IS_GEN2(dev))
5412
	if (IS_GEN2(dev)) {
4973
		i8xx_update_pll(intel_crtc,
5413
		i8xx_update_pll(intel_crtc,
4974
				has_reduced_clock ? &reduced_clock : NULL,
5414
				has_reduced_clock ? &reduced_clock : NULL,
4975
				num_connectors);
5415
				num_connectors);
4976
	else if (IS_VALLEYVIEW(dev))
5416
	} else if (IS_VALLEYVIEW(dev)) {
4977
		vlv_update_pll(intel_crtc);
5417
		vlv_update_pll(intel_crtc);
4978
	else
5418
	} else {
4979
		i9xx_update_pll(intel_crtc,
5419
		i9xx_update_pll(intel_crtc,
4980
				has_reduced_clock ? &reduced_clock : NULL,
5420
				has_reduced_clock ? &reduced_clock : NULL,
4981
				num_connectors);
5421
				num_connectors);
-
 
5422
	}
-
 
5423
 
4982
 
5424
skip_dpll:
4983
	/* Set up the display plane register */
5425
	/* Set up the display plane register */
4984
	dspcntr = DISPPLANE_GAMMA_ENABLE;
5426
	dspcntr = DISPPLANE_GAMMA_ENABLE;
4985
 
5427
 
4986
	if (!IS_VALLEYVIEW(dev)) {
5428
	if (!IS_VALLEYVIEW(dev)) {
4987
	if (pipe == 0)
5429
	if (pipe == 0)
4988
		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5430
		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4989
	else
5431
	else
4990
		dspcntr |= DISPPLANE_SEL_PIPE_B;
5432
		dspcntr |= DISPPLANE_SEL_PIPE_B;
4991
	}
5433
	}
4992
 
5434
 
4993
	intel_set_pipe_timings(intel_crtc);
5435
	intel_set_pipe_timings(intel_crtc);
4994
 
5436
 
4995
	/* pipesrc and dspsize control the size that is scaled from,
5437
	/* pipesrc and dspsize control the size that is scaled from,
4996
	 * which should always be the user's requested size.
5438
	 * which should always be the user's requested size.
4997
	 */
5439
	 */
4998
	I915_WRITE(DSPSIZE(plane),
5440
	I915_WRITE(DSPSIZE(plane),
4999
		   ((mode->vdisplay - 1) << 16) |
5441
		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
5000
		   (mode->hdisplay - 1));
5442
		   (intel_crtc->config.pipe_src_w - 1));
5001
	I915_WRITE(DSPPOS(plane), 0);
5443
	I915_WRITE(DSPPOS(plane), 0);
5002
 
5444
 
5003
	i9xx_set_pipeconf(intel_crtc);
5445
	i9xx_set_pipeconf(intel_crtc);
5004
 
5446
 
5005
	I915_WRITE(DSPCNTR(plane), dspcntr);
5447
	I915_WRITE(DSPCNTR(plane), dspcntr);
5006
	POSTING_READ(DSPCNTR(plane));
5448
	POSTING_READ(DSPCNTR(plane));
5007
 
5449
 
5008
	ret = intel_pipe_set_base(crtc, x, y, fb);
5450
	ret = intel_pipe_set_base(crtc, x, y, fb);
5009
 
-
 
5010
	intel_update_watermarks(dev);
-
 
5011
 
5451
 
5012
    return ret;
5452
    return ret;
5013
}
5453
}
5014
 
5454
 
5015
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5455
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5016
				 struct intel_crtc_config *pipe_config)
5456
				 struct intel_crtc_config *pipe_config)
5017
{
5457
{
5018
	struct drm_device *dev = crtc->base.dev;
5458
	struct drm_device *dev = crtc->base.dev;
5019
	struct drm_i915_private *dev_priv = dev->dev_private;
5459
	struct drm_i915_private *dev_priv = dev->dev_private;
5020
	uint32_t tmp;
5460
	uint32_t tmp;
-
 
5461
 
-
 
5462
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
-
 
5463
		return;
5021
 
5464
 
5022
	tmp = I915_READ(PFIT_CONTROL);
5465
	tmp = I915_READ(PFIT_CONTROL);
5023
	if (!(tmp & PFIT_ENABLE))
5466
	if (!(tmp & PFIT_ENABLE))
5024
		return;
5467
		return;
5025
 
5468
 
5026
	/* Check whether the pfit is attached to our pipe. */
5469
	/* Check whether the pfit is attached to our pipe. */
5027
	if (INTEL_INFO(dev)->gen < 4) {
5470
	if (INTEL_INFO(dev)->gen < 4) {
5028
		if (crtc->pipe != PIPE_B)
5471
		if (crtc->pipe != PIPE_B)
5029
			return;
5472
			return;
5030
	} else {
5473
	} else {
5031
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5474
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5032
			return;
5475
			return;
5033
	}
5476
	}
5034
 
5477
 
5035
	pipe_config->gmch_pfit.control = tmp;
5478
	pipe_config->gmch_pfit.control = tmp;
5036
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
5479
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
5037
	if (INTEL_INFO(dev)->gen < 5)
5480
	if (INTEL_INFO(dev)->gen < 5)
5038
		pipe_config->gmch_pfit.lvds_border_bits =
5481
		pipe_config->gmch_pfit.lvds_border_bits =
5039
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
5482
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
5040
}
5483
}
5041
 
5484
 
5042
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5485
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5043
			       struct intel_crtc_config *pipe_config)
5486
			       struct intel_crtc_config *pipe_config)
5044
{
5487
{
5045
	struct drm_device *dev = crtc->base.dev;
5488
	struct drm_device *dev = crtc->base.dev;
5046
	struct drm_i915_private *dev_priv = dev->dev_private;
5489
	struct drm_i915_private *dev_priv = dev->dev_private;
5047
	int pipe = pipe_config->cpu_transcoder;
5490
	int pipe = pipe_config->cpu_transcoder;
5048
	intel_clock_t clock;
5491
	intel_clock_t clock;
5049
	u32 mdiv;
5492
	u32 mdiv;
5050
	int refclk = 100000;
5493
	int refclk = 100000;
5051
 
5494
 
5052
	mutex_lock(&dev_priv->dpio_lock);
5495
	mutex_lock(&dev_priv->dpio_lock);
5053
	mdiv = vlv_dpio_read(dev_priv, DPIO_DIV(pipe));
5496
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
5054
	mutex_unlock(&dev_priv->dpio_lock);
5497
	mutex_unlock(&dev_priv->dpio_lock);
5055
 
5498
 
5056
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5499
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5057
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
5500
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
5058
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5501
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5059
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5502
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5060
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5503
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5061
 
-
 
5062
	clock.vco = refclk * clock.m1 * clock.m2 / clock.n;
5504
 
-
 
5505
	vlv_clock(refclk, &clock);
5063
	clock.dot = 2 * clock.vco / (clock.p1 * clock.p2);
5506
 
5064
 
5507
	/* clock.dot is the fast clock */
5065
	pipe_config->adjusted_mode.clock = clock.dot / 10;
5508
	pipe_config->port_clock = clock.dot / 5;
5066
}
5509
}
5067
 
5510
 
5068
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5511
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5069
				 struct intel_crtc_config *pipe_config)
5512
				 struct intel_crtc_config *pipe_config)
5070
{
5513
{
5071
	struct drm_device *dev = crtc->base.dev;
5514
	struct drm_device *dev = crtc->base.dev;
5072
	struct drm_i915_private *dev_priv = dev->dev_private;
5515
	struct drm_i915_private *dev_priv = dev->dev_private;
5073
	uint32_t tmp;
5516
	uint32_t tmp;
5074
 
5517
 
5075
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5518
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5076
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5519
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5077
 
5520
 
5078
	tmp = I915_READ(PIPECONF(crtc->pipe));
5521
	tmp = I915_READ(PIPECONF(crtc->pipe));
5079
	if (!(tmp & PIPECONF_ENABLE))
5522
	if (!(tmp & PIPECONF_ENABLE))
5080
		return false;
5523
		return false;
5081
 
5524
 
5082
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5525
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5083
		switch (tmp & PIPECONF_BPC_MASK) {
5526
		switch (tmp & PIPECONF_BPC_MASK) {
5084
		case PIPECONF_6BPC:
5527
		case PIPECONF_6BPC:
5085
			pipe_config->pipe_bpp = 18;
5528
			pipe_config->pipe_bpp = 18;
5086
			break;
5529
			break;
5087
		case PIPECONF_8BPC:
5530
		case PIPECONF_8BPC:
5088
			pipe_config->pipe_bpp = 24;
5531
			pipe_config->pipe_bpp = 24;
5089
			break;
5532
			break;
5090
		case PIPECONF_10BPC:
5533
		case PIPECONF_10BPC:
5091
			pipe_config->pipe_bpp = 30;
5534
			pipe_config->pipe_bpp = 30;
5092
			break;
5535
			break;
5093
		default:
5536
		default:
5094
			break;
5537
			break;
5095
		}
5538
		}
5096
	}
5539
	}
-
 
5540
 
-
 
5541
	if (INTEL_INFO(dev)->gen < 4)
-
 
5542
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5097
 
5543
 
5098
	intel_get_pipe_timings(crtc, pipe_config);
5544
	intel_get_pipe_timings(crtc, pipe_config);
5099
 
5545
 
5100
	i9xx_get_pfit_config(crtc, pipe_config);
5546
	i9xx_get_pfit_config(crtc, pipe_config);
5101
 
5547
 
5102
	if (INTEL_INFO(dev)->gen >= 4) {
5548
	if (INTEL_INFO(dev)->gen >= 4) {
5103
		tmp = I915_READ(DPLL_MD(crtc->pipe));
5549
		tmp = I915_READ(DPLL_MD(crtc->pipe));
5104
		pipe_config->pixel_multiplier =
5550
		pipe_config->pixel_multiplier =
5105
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5551
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5106
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5552
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5107
		pipe_config->dpll_hw_state.dpll_md = tmp;
5553
		pipe_config->dpll_hw_state.dpll_md = tmp;
5108
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5554
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5109
		tmp = I915_READ(DPLL(crtc->pipe));
5555
		tmp = I915_READ(DPLL(crtc->pipe));
5110
		pipe_config->pixel_multiplier =
5556
		pipe_config->pixel_multiplier =
5111
			((tmp & SDVO_MULTIPLIER_MASK)
5557
			((tmp & SDVO_MULTIPLIER_MASK)
5112
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5558
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5113
	} else {
5559
	} else {
5114
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
5560
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
5115
		 * port and will be fixed up in the encoder->get_config
5561
		 * port and will be fixed up in the encoder->get_config
5116
		 * function. */
5562
		 * function. */
5117
		pipe_config->pixel_multiplier = 1;
5563
		pipe_config->pixel_multiplier = 1;
5118
	}
5564
	}
5119
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
5565
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
5120
	if (!IS_VALLEYVIEW(dev)) {
5566
	if (!IS_VALLEYVIEW(dev)) {
5121
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
5567
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
5122
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
5568
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
5123
	} else {
5569
	} else {
5124
		/* Mask out read-only status bits. */
5570
		/* Mask out read-only status bits. */
5125
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5571
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5126
						     DPLL_PORTC_READY_MASK |
5572
						     DPLL_PORTC_READY_MASK |
5127
						     DPLL_PORTB_READY_MASK);
5573
						     DPLL_PORTB_READY_MASK);
5128
	}
5574
	}
-
 
5575
 
-
 
5576
	if (IS_VALLEYVIEW(dev))
-
 
5577
		vlv_crtc_clock_get(crtc, pipe_config);
-
 
5578
	else
-
 
5579
		i9xx_crtc_clock_get(crtc, pipe_config);
5129
 
5580
 
5130
	return true;
5581
	return true;
5131
}
5582
}
5132
 
5583
 
5133
static void ironlake_init_pch_refclk(struct drm_device *dev)
5584
static void ironlake_init_pch_refclk(struct drm_device *dev)
5134
{
5585
{
5135
	struct drm_i915_private *dev_priv = dev->dev_private;
5586
	struct drm_i915_private *dev_priv = dev->dev_private;
5136
	struct drm_mode_config *mode_config = &dev->mode_config;
5587
	struct drm_mode_config *mode_config = &dev->mode_config;
5137
	struct intel_encoder *encoder;
5588
	struct intel_encoder *encoder;
5138
	u32 val, final;
5589
	u32 val, final;
5139
	bool has_lvds = false;
5590
	bool has_lvds = false;
5140
	bool has_cpu_edp = false;
5591
	bool has_cpu_edp = false;
5141
	bool has_panel = false;
5592
	bool has_panel = false;
5142
	bool has_ck505 = false;
5593
	bool has_ck505 = false;
5143
	bool can_ssc = false;
5594
	bool can_ssc = false;
5144
 
5595
 
5145
	/* We need to take the global config into account */
5596
	/* We need to take the global config into account */
5146
		list_for_each_entry(encoder, &mode_config->encoder_list,
5597
		list_for_each_entry(encoder, &mode_config->encoder_list,
5147
				    base.head) {
5598
				    base.head) {
5148
			switch (encoder->type) {
5599
			switch (encoder->type) {
5149
			case INTEL_OUTPUT_LVDS:
5600
			case INTEL_OUTPUT_LVDS:
5150
			has_panel = true;
5601
			has_panel = true;
5151
				has_lvds = true;
5602
				has_lvds = true;
5152
			break;
5603
			break;
5153
			case INTEL_OUTPUT_EDP:
5604
			case INTEL_OUTPUT_EDP:
5154
			has_panel = true;
5605
			has_panel = true;
5155
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
5606
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
5156
				has_cpu_edp = true;
5607
				has_cpu_edp = true;
5157
				break;
5608
				break;
5158
			}
5609
			}
5159
		}
5610
		}
5160
 
5611
 
5161
	if (HAS_PCH_IBX(dev)) {
5612
	if (HAS_PCH_IBX(dev)) {
5162
		has_ck505 = dev_priv->vbt.display_clock_mode;
5613
		has_ck505 = dev_priv->vbt.display_clock_mode;
5163
		can_ssc = has_ck505;
5614
		can_ssc = has_ck505;
5164
	} else {
5615
	} else {
5165
		has_ck505 = false;
5616
		has_ck505 = false;
5166
		can_ssc = true;
5617
		can_ssc = true;
5167
	}
5618
	}
5168
 
5619
 
5169
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
5620
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
5170
		      has_panel, has_lvds, has_ck505);
5621
		      has_panel, has_lvds, has_ck505);
5171
 
5622
 
5172
	/* Ironlake: try to setup display ref clock before DPLL
5623
	/* Ironlake: try to setup display ref clock before DPLL
5173
	 * enabling. This is only under driver's control after
5624
	 * enabling. This is only under driver's control after
5174
	 * PCH B stepping, previous chipset stepping should be
5625
	 * PCH B stepping, previous chipset stepping should be
5175
	 * ignoring this setting.
5626
	 * ignoring this setting.
5176
	 */
5627
	 */
5177
	val = I915_READ(PCH_DREF_CONTROL);
5628
	val = I915_READ(PCH_DREF_CONTROL);
5178
 
5629
 
5179
	/* As we must carefully and slowly disable/enable each source in turn,
5630
	/* As we must carefully and slowly disable/enable each source in turn,
5180
	 * compute the final state we want first and check if we need to
5631
	 * compute the final state we want first and check if we need to
5181
	 * make any changes at all.
5632
	 * make any changes at all.
5182
	 */
5633
	 */
5183
	final = val;
5634
	final = val;
5184
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
5635
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
5185
	if (has_ck505)
5636
	if (has_ck505)
5186
		final |= DREF_NONSPREAD_CK505_ENABLE;
5637
		final |= DREF_NONSPREAD_CK505_ENABLE;
5187
	else
5638
	else
5188
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
5639
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
5189
 
5640
 
5190
	final &= ~DREF_SSC_SOURCE_MASK;
5641
	final &= ~DREF_SSC_SOURCE_MASK;
5191
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5642
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5192
	final &= ~DREF_SSC1_ENABLE;
5643
	final &= ~DREF_SSC1_ENABLE;
5193
 
5644
 
5194
	if (has_panel) {
5645
	if (has_panel) {
5195
		final |= DREF_SSC_SOURCE_ENABLE;
5646
		final |= DREF_SSC_SOURCE_ENABLE;
5196
 
5647
 
5197
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
5648
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
5198
			final |= DREF_SSC1_ENABLE;
5649
			final |= DREF_SSC1_ENABLE;
5199
 
5650
 
5200
		if (has_cpu_edp) {
5651
		if (has_cpu_edp) {
5201
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
5652
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
5202
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5653
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5203
			else
5654
			else
5204
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5655
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5205
		} else
5656
		} else
5206
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5657
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5207
	} else {
5658
	} else {
5208
		final |= DREF_SSC_SOURCE_DISABLE;
5659
		final |= DREF_SSC_SOURCE_DISABLE;
5209
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5660
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5210
	}
5661
	}
5211
 
5662
 
5212
	if (final == val)
5663
	if (final == val)
5213
		return;
5664
		return;
5214
 
5665
 
5215
	/* Always enable nonspread source */
5666
	/* Always enable nonspread source */
5216
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
5667
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
5217
 
5668
 
5218
	if (has_ck505)
5669
	if (has_ck505)
5219
		val |= DREF_NONSPREAD_CK505_ENABLE;
5670
		val |= DREF_NONSPREAD_CK505_ENABLE;
5220
	else
5671
	else
5221
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
5672
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
5222
 
5673
 
5223
	if (has_panel) {
5674
	if (has_panel) {
5224
		val &= ~DREF_SSC_SOURCE_MASK;
5675
		val &= ~DREF_SSC_SOURCE_MASK;
5225
		val |= DREF_SSC_SOURCE_ENABLE;
5676
		val |= DREF_SSC_SOURCE_ENABLE;
5226
 
5677
 
5227
		/* SSC must be turned on before enabling the CPU output  */
5678
		/* SSC must be turned on before enabling the CPU output  */
5228
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5679
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5229
			DRM_DEBUG_KMS("Using SSC on panel\n");
5680
			DRM_DEBUG_KMS("Using SSC on panel\n");
5230
			val |= DREF_SSC1_ENABLE;
5681
			val |= DREF_SSC1_ENABLE;
5231
		} else
5682
		} else
5232
			val &= ~DREF_SSC1_ENABLE;
5683
			val &= ~DREF_SSC1_ENABLE;
5233
 
5684
 
5234
		/* Get SSC going before enabling the outputs */
5685
		/* Get SSC going before enabling the outputs */
5235
		I915_WRITE(PCH_DREF_CONTROL, val);
5686
		I915_WRITE(PCH_DREF_CONTROL, val);
5236
			POSTING_READ(PCH_DREF_CONTROL);
5687
			POSTING_READ(PCH_DREF_CONTROL);
5237
			udelay(200);
5688
			udelay(200);
5238
 
5689
 
5239
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5690
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5240
 
5691
 
5241
		/* Enable CPU source on CPU attached eDP */
5692
		/* Enable CPU source on CPU attached eDP */
5242
		if (has_cpu_edp) {
5693
		if (has_cpu_edp) {
5243
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5694
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5244
				DRM_DEBUG_KMS("Using SSC on eDP\n");
5695
				DRM_DEBUG_KMS("Using SSC on eDP\n");
5245
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5696
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5246
			}
5697
			}
5247
			else
5698
			else
5248
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5699
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5249
		} else
5700
		} else
5250
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5701
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5251
 
5702
 
5252
		I915_WRITE(PCH_DREF_CONTROL, val);
5703
		I915_WRITE(PCH_DREF_CONTROL, val);
5253
		POSTING_READ(PCH_DREF_CONTROL);
5704
		POSTING_READ(PCH_DREF_CONTROL);
5254
		udelay(200);
5705
		udelay(200);
5255
		} else {
5706
		} else {
5256
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
5707
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
5257
 
5708
 
5258
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5709
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5259
 
5710
 
5260
		/* Turn off CPU output */
5711
		/* Turn off CPU output */
5261
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5712
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5262
 
5713
 
5263
		I915_WRITE(PCH_DREF_CONTROL, val);
5714
		I915_WRITE(PCH_DREF_CONTROL, val);
5264
		POSTING_READ(PCH_DREF_CONTROL);
5715
		POSTING_READ(PCH_DREF_CONTROL);
5265
		udelay(200);
5716
		udelay(200);
5266
 
5717
 
5267
		/* Turn off the SSC source */
5718
		/* Turn off the SSC source */
5268
		val &= ~DREF_SSC_SOURCE_MASK;
5719
		val &= ~DREF_SSC_SOURCE_MASK;
5269
		val |= DREF_SSC_SOURCE_DISABLE;
5720
		val |= DREF_SSC_SOURCE_DISABLE;
5270
 
5721
 
5271
		/* Turn off SSC1 */
5722
		/* Turn off SSC1 */
5272
		val &= ~DREF_SSC1_ENABLE;
5723
		val &= ~DREF_SSC1_ENABLE;
5273
 
5724
 
5274
		I915_WRITE(PCH_DREF_CONTROL, val);
5725
		I915_WRITE(PCH_DREF_CONTROL, val);
5275
		POSTING_READ(PCH_DREF_CONTROL);
5726
		POSTING_READ(PCH_DREF_CONTROL);
5276
		udelay(200);
5727
		udelay(200);
5277
	}
5728
	}
5278
 
5729
 
5279
	BUG_ON(val != final);
5730
	BUG_ON(val != final);
5280
}
5731
}
5281
 
5732
 
5282
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5733
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5283
{
5734
{
5284
	uint32_t tmp;
5735
	uint32_t tmp;
5285
 
5736
 
5286
		tmp = I915_READ(SOUTH_CHICKEN2);
5737
		tmp = I915_READ(SOUTH_CHICKEN2);
5287
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5738
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5288
		I915_WRITE(SOUTH_CHICKEN2, tmp);
5739
		I915_WRITE(SOUTH_CHICKEN2, tmp);
5289
 
5740
 
5290
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5741
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5291
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5742
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5292
			DRM_ERROR("FDI mPHY reset assert timeout\n");
5743
			DRM_ERROR("FDI mPHY reset assert timeout\n");
5293
 
5744
 
5294
		tmp = I915_READ(SOUTH_CHICKEN2);
5745
		tmp = I915_READ(SOUTH_CHICKEN2);
5295
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5746
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5296
		I915_WRITE(SOUTH_CHICKEN2, tmp);
5747
		I915_WRITE(SOUTH_CHICKEN2, tmp);
5297
 
5748
 
5298
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5749
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5299
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5750
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5300
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5751
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5301
}
5752
}
5302
 
5753
 
5303
/* WaMPhyProgramming:hsw */
5754
/* WaMPhyProgramming:hsw */
5304
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5755
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5305
{
5756
{
5306
	uint32_t tmp;
5757
	uint32_t tmp;
5307
 
5758
 
5308
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5759
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5309
	tmp &= ~(0xFF << 24);
5760
	tmp &= ~(0xFF << 24);
5310
	tmp |= (0x12 << 24);
5761
	tmp |= (0x12 << 24);
5311
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5762
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5312
 
5763
 
5313
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5764
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5314
	tmp |= (1 << 11);
5765
	tmp |= (1 << 11);
5315
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5766
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5316
 
5767
 
5317
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5768
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5318
	tmp |= (1 << 11);
5769
	tmp |= (1 << 11);
5319
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5770
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5320
 
5771
 
5321
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5772
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5322
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5773
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5323
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5774
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5324
 
5775
 
5325
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5776
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5326
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5777
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5327
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5778
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5328
 
5779
 
5329
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5780
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5330
		tmp &= ~(7 << 13);
5781
		tmp &= ~(7 << 13);
5331
		tmp |= (5 << 13);
5782
		tmp |= (5 << 13);
5332
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5783
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5333
 
5784
 
5334
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5785
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5335
		tmp &= ~(7 << 13);
5786
		tmp &= ~(7 << 13);
5336
		tmp |= (5 << 13);
5787
		tmp |= (5 << 13);
5337
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5788
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5338
 
5789
 
5339
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5790
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5340
	tmp &= ~0xFF;
5791
	tmp &= ~0xFF;
5341
	tmp |= 0x1C;
5792
	tmp |= 0x1C;
5342
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5793
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5343
 
5794
 
5344
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5795
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5345
	tmp &= ~0xFF;
5796
	tmp &= ~0xFF;
5346
	tmp |= 0x1C;
5797
	tmp |= 0x1C;
5347
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5798
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5348
 
5799
 
5349
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5800
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5350
	tmp &= ~(0xFF << 16);
5801
	tmp &= ~(0xFF << 16);
5351
	tmp |= (0x1C << 16);
5802
	tmp |= (0x1C << 16);
5352
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5803
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5353
 
5804
 
5354
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5805
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5355
	tmp &= ~(0xFF << 16);
5806
	tmp &= ~(0xFF << 16);
5356
	tmp |= (0x1C << 16);
5807
	tmp |= (0x1C << 16);
5357
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5808
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5358
 
5809
 
5359
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5810
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5360
		tmp |= (1 << 27);
5811
		tmp |= (1 << 27);
5361
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5812
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5362
 
5813
 
5363
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5814
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5364
		tmp |= (1 << 27);
5815
		tmp |= (1 << 27);
5365
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5816
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5366
 
5817
 
5367
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5818
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5368
		tmp &= ~(0xF << 28);
5819
		tmp &= ~(0xF << 28);
5369
		tmp |= (4 << 28);
5820
		tmp |= (4 << 28);
5370
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5821
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5371
 
5822
 
5372
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5823
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5373
		tmp &= ~(0xF << 28);
5824
		tmp &= ~(0xF << 28);
5374
		tmp |= (4 << 28);
5825
		tmp |= (4 << 28);
5375
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5826
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5376
}
5827
}
5377
 
5828
 
5378
/* Implements 3 different sequences from BSpec chapter "Display iCLK
5829
/* Implements 3 different sequences from BSpec chapter "Display iCLK
5379
 * Programming" based on the parameters passed:
5830
 * Programming" based on the parameters passed:
5380
 * - Sequence to enable CLKOUT_DP
5831
 * - Sequence to enable CLKOUT_DP
5381
 * - Sequence to enable CLKOUT_DP without spread
5832
 * - Sequence to enable CLKOUT_DP without spread
5382
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5833
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5383
 */
5834
 */
5384
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
5835
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
5385
				 bool with_fdi)
5836
				 bool with_fdi)
5386
{
5837
{
5387
	struct drm_i915_private *dev_priv = dev->dev_private;
5838
	struct drm_i915_private *dev_priv = dev->dev_private;
5388
	uint32_t reg, tmp;
5839
	uint32_t reg, tmp;
5389
 
5840
 
5390
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
5841
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
5391
		with_spread = true;
5842
		with_spread = true;
5392
	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
5843
	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
5393
		 with_fdi, "LP PCH doesn't have FDI\n"))
5844
		 with_fdi, "LP PCH doesn't have FDI\n"))
5394
		with_fdi = false;
5845
		with_fdi = false;
5395
 
5846
 
5396
	mutex_lock(&dev_priv->dpio_lock);
5847
	mutex_lock(&dev_priv->dpio_lock);
5397
 
5848
 
5398
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5849
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5399
	tmp &= ~SBI_SSCCTL_DISABLE;
5850
	tmp &= ~SBI_SSCCTL_DISABLE;
5400
	tmp |= SBI_SSCCTL_PATHALT;
5851
	tmp |= SBI_SSCCTL_PATHALT;
5401
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5852
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5402
 
5853
 
5403
	udelay(24);
5854
	udelay(24);
5404
 
5855
 
5405
	if (with_spread) {
5856
	if (with_spread) {
5406
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5857
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5407
		tmp &= ~SBI_SSCCTL_PATHALT;
5858
		tmp &= ~SBI_SSCCTL_PATHALT;
5408
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5859
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5409
 
5860
 
5410
		if (with_fdi) {
5861
		if (with_fdi) {
5411
			lpt_reset_fdi_mphy(dev_priv);
5862
			lpt_reset_fdi_mphy(dev_priv);
5412
			lpt_program_fdi_mphy(dev_priv);
5863
			lpt_program_fdi_mphy(dev_priv);
5413
		}
5864
		}
5414
	}
5865
	}
5415
 
5866
 
5416
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5867
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5417
	       SBI_GEN0 : SBI_DBUFF0;
5868
	       SBI_GEN0 : SBI_DBUFF0;
5418
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5869
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5419
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5870
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5420
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5871
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5421
 
5872
 
5422
	mutex_unlock(&dev_priv->dpio_lock);
5873
	mutex_unlock(&dev_priv->dpio_lock);
5423
}
5874
}
5424
 
5875
 
5425
/* Sequence to disable CLKOUT_DP */
5876
/* Sequence to disable CLKOUT_DP */
5426
static void lpt_disable_clkout_dp(struct drm_device *dev)
5877
static void lpt_disable_clkout_dp(struct drm_device *dev)
5427
{
5878
{
5428
	struct drm_i915_private *dev_priv = dev->dev_private;
5879
	struct drm_i915_private *dev_priv = dev->dev_private;
5429
	uint32_t reg, tmp;
5880
	uint32_t reg, tmp;
5430
 
5881
 
5431
	mutex_lock(&dev_priv->dpio_lock);
5882
	mutex_lock(&dev_priv->dpio_lock);
5432
 
5883
 
5433
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5884
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5434
	       SBI_GEN0 : SBI_DBUFF0;
5885
	       SBI_GEN0 : SBI_DBUFF0;
5435
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5886
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5436
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5887
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5437
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5888
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5438
 
5889
 
5439
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5890
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5440
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5891
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5441
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5892
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5442
			tmp |= SBI_SSCCTL_PATHALT;
5893
			tmp |= SBI_SSCCTL_PATHALT;
5443
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5894
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5444
			udelay(32);
5895
			udelay(32);
5445
		}
5896
		}
5446
		tmp |= SBI_SSCCTL_DISABLE;
5897
		tmp |= SBI_SSCCTL_DISABLE;
5447
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5898
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5448
	}
5899
	}
5449
 
5900
 
5450
	mutex_unlock(&dev_priv->dpio_lock);
5901
	mutex_unlock(&dev_priv->dpio_lock);
5451
}
5902
}
5452
 
5903
 
5453
static void lpt_init_pch_refclk(struct drm_device *dev)
5904
static void lpt_init_pch_refclk(struct drm_device *dev)
5454
{
5905
{
5455
	struct drm_mode_config *mode_config = &dev->mode_config;
5906
	struct drm_mode_config *mode_config = &dev->mode_config;
5456
	struct intel_encoder *encoder;
5907
	struct intel_encoder *encoder;
5457
	bool has_vga = false;
5908
	bool has_vga = false;
5458
 
5909
 
5459
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5910
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5460
		switch (encoder->type) {
5911
		switch (encoder->type) {
5461
		case INTEL_OUTPUT_ANALOG:
5912
		case INTEL_OUTPUT_ANALOG:
5462
			has_vga = true;
5913
			has_vga = true;
5463
			break;
5914
			break;
5464
		}
5915
		}
5465
	}
5916
	}
5466
 
5917
 
5467
	if (has_vga)
5918
	if (has_vga)
5468
		lpt_enable_clkout_dp(dev, true, true);
5919
		lpt_enable_clkout_dp(dev, true, true);
5469
	else
5920
	else
5470
		lpt_disable_clkout_dp(dev);
5921
		lpt_disable_clkout_dp(dev);
5471
}
5922
}
5472
 
5923
 
5473
/*
5924
/*
5474
 * Initialize reference clocks when the driver loads
5925
 * Initialize reference clocks when the driver loads
5475
 */
5926
 */
5476
void intel_init_pch_refclk(struct drm_device *dev)
5927
void intel_init_pch_refclk(struct drm_device *dev)
5477
{
5928
{
5478
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5929
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5479
		ironlake_init_pch_refclk(dev);
5930
		ironlake_init_pch_refclk(dev);
5480
	else if (HAS_PCH_LPT(dev))
5931
	else if (HAS_PCH_LPT(dev))
5481
		lpt_init_pch_refclk(dev);
5932
		lpt_init_pch_refclk(dev);
5482
}
5933
}
5483
 
5934
 
5484
static int ironlake_get_refclk(struct drm_crtc *crtc)
5935
static int ironlake_get_refclk(struct drm_crtc *crtc)
5485
{
5936
{
5486
	struct drm_device *dev = crtc->dev;
5937
	struct drm_device *dev = crtc->dev;
5487
	struct drm_i915_private *dev_priv = dev->dev_private;
5938
	struct drm_i915_private *dev_priv = dev->dev_private;
5488
	struct intel_encoder *encoder;
5939
	struct intel_encoder *encoder;
5489
	int num_connectors = 0;
5940
	int num_connectors = 0;
5490
	bool is_lvds = false;
5941
	bool is_lvds = false;
5491
 
5942
 
5492
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5943
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5493
		switch (encoder->type) {
5944
		switch (encoder->type) {
5494
		case INTEL_OUTPUT_LVDS:
5945
		case INTEL_OUTPUT_LVDS:
5495
			is_lvds = true;
5946
			is_lvds = true;
5496
			break;
5947
			break;
5497
		}
5948
		}
5498
		num_connectors++;
5949
		num_connectors++;
5499
	}
5950
	}
5500
 
5951
 
5501
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5952
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5502
		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5953
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
5503
			      dev_priv->vbt.lvds_ssc_freq);
5954
			      dev_priv->vbt.lvds_ssc_freq);
5504
		return dev_priv->vbt.lvds_ssc_freq * 1000;
5955
		return dev_priv->vbt.lvds_ssc_freq;
5505
	}
5956
	}
5506
 
5957
 
5507
	return 120000;
5958
	return 120000;
5508
}
5959
}
5509
 
5960
 
5510
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
5961
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
5511
{
5962
{
5512
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5963
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5513
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5964
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5514
	int pipe = intel_crtc->pipe;
5965
	int pipe = intel_crtc->pipe;
5515
	uint32_t val;
5966
	uint32_t val;
5516
 
5967
 
5517
	val = 0;
5968
	val = 0;
5518
 
5969
 
5519
	switch (intel_crtc->config.pipe_bpp) {
5970
	switch (intel_crtc->config.pipe_bpp) {
5520
	case 18:
5971
	case 18:
5521
		val |= PIPECONF_6BPC;
5972
		val |= PIPECONF_6BPC;
5522
		break;
5973
		break;
5523
	case 24:
5974
	case 24:
5524
		val |= PIPECONF_8BPC;
5975
		val |= PIPECONF_8BPC;
5525
		break;
5976
		break;
5526
	case 30:
5977
	case 30:
5527
		val |= PIPECONF_10BPC;
5978
		val |= PIPECONF_10BPC;
5528
		break;
5979
		break;
5529
	case 36:
5980
	case 36:
5530
		val |= PIPECONF_12BPC;
5981
		val |= PIPECONF_12BPC;
5531
		break;
5982
		break;
5532
	default:
5983
	default:
5533
		/* Case prevented by intel_choose_pipe_bpp_dither. */
5984
		/* Case prevented by intel_choose_pipe_bpp_dither. */
5534
		BUG();
5985
		BUG();
5535
	}
5986
	}
5536
 
5987
 
5537
	if (intel_crtc->config.dither)
5988
	if (intel_crtc->config.dither)
5538
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5989
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5539
 
5990
 
5540
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5991
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5541
		val |= PIPECONF_INTERLACED_ILK;
5992
		val |= PIPECONF_INTERLACED_ILK;
5542
	else
5993
	else
5543
		val |= PIPECONF_PROGRESSIVE;
5994
		val |= PIPECONF_PROGRESSIVE;
5544
 
5995
 
5545
	if (intel_crtc->config.limited_color_range)
5996
	if (intel_crtc->config.limited_color_range)
5546
		val |= PIPECONF_COLOR_RANGE_SELECT;
5997
		val |= PIPECONF_COLOR_RANGE_SELECT;
5547
 
5998
 
5548
	I915_WRITE(PIPECONF(pipe), val);
5999
	I915_WRITE(PIPECONF(pipe), val);
5549
	POSTING_READ(PIPECONF(pipe));
6000
	POSTING_READ(PIPECONF(pipe));
5550
}
6001
}
5551
 
6002
 
5552
/*
6003
/*
5553
 * Set up the pipe CSC unit.
6004
 * Set up the pipe CSC unit.
5554
 *
6005
 *
5555
 * Currently only full range RGB to limited range RGB conversion
6006
 * Currently only full range RGB to limited range RGB conversion
5556
 * is supported, but eventually this should handle various
6007
 * is supported, but eventually this should handle various
5557
 * RGB<->YCbCr scenarios as well.
6008
 * RGB<->YCbCr scenarios as well.
5558
 */
6009
 */
5559
static void intel_set_pipe_csc(struct drm_crtc *crtc)
6010
static void intel_set_pipe_csc(struct drm_crtc *crtc)
5560
{
6011
{
5561
	struct drm_device *dev = crtc->dev;
6012
	struct drm_device *dev = crtc->dev;
5562
	struct drm_i915_private *dev_priv = dev->dev_private;
6013
	struct drm_i915_private *dev_priv = dev->dev_private;
5563
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6014
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5564
	int pipe = intel_crtc->pipe;
6015
	int pipe = intel_crtc->pipe;
5565
	uint16_t coeff = 0x7800; /* 1.0 */
6016
	uint16_t coeff = 0x7800; /* 1.0 */
5566
 
6017
 
5567
	/*
6018
	/*
5568
	 * TODO: Check what kind of values actually come out of the pipe
6019
	 * TODO: Check what kind of values actually come out of the pipe
5569
	 * with these coeff/postoff values and adjust to get the best
6020
	 * with these coeff/postoff values and adjust to get the best
5570
	 * accuracy. Perhaps we even need to take the bpc value into
6021
	 * accuracy. Perhaps we even need to take the bpc value into
5571
	 * consideration.
6022
	 * consideration.
5572
	 */
6023
	 */
5573
 
6024
 
5574
	if (intel_crtc->config.limited_color_range)
6025
	if (intel_crtc->config.limited_color_range)
5575
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
6026
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
5576
 
6027
 
5577
	/*
6028
	/*
5578
	 * GY/GU and RY/RU should be the other way around according
6029
	 * GY/GU and RY/RU should be the other way around according
5579
	 * to BSpec, but reality doesn't agree. Just set them up in
6030
	 * to BSpec, but reality doesn't agree. Just set them up in
5580
	 * a way that results in the correct picture.
6031
	 * a way that results in the correct picture.
5581
	 */
6032
	 */
5582
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
6033
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
5583
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
6034
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
5584
 
6035
 
5585
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
6036
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
5586
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
6037
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
5587
 
6038
 
5588
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
6039
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
5589
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
6040
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
5590
 
6041
 
5591
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
6042
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
5592
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
6043
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
5593
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
6044
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
5594
 
6045
 
5595
	if (INTEL_INFO(dev)->gen > 6) {
6046
	if (INTEL_INFO(dev)->gen > 6) {
5596
		uint16_t postoff = 0;
6047
		uint16_t postoff = 0;
5597
 
6048
 
5598
		if (intel_crtc->config.limited_color_range)
6049
		if (intel_crtc->config.limited_color_range)
5599
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
6050
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
5600
 
6051
 
5601
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6052
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
5602
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
6053
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
5603
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
6054
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
5604
 
6055
 
5605
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
6056
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
5606
	} else {
6057
	} else {
5607
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
6058
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
5608
 
6059
 
5609
		if (intel_crtc->config.limited_color_range)
6060
		if (intel_crtc->config.limited_color_range)
5610
			mode |= CSC_BLACK_SCREEN_OFFSET;
6061
			mode |= CSC_BLACK_SCREEN_OFFSET;
5611
 
6062
 
5612
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
6063
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
5613
	}
6064
	}
5614
}
6065
}
5615
 
6066
 
5616
static void haswell_set_pipeconf(struct drm_crtc *crtc)
6067
static void haswell_set_pipeconf(struct drm_crtc *crtc)
5617
{
6068
{
-
 
6069
	struct drm_device *dev = crtc->dev;
5618
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
6070
	struct drm_i915_private *dev_priv = dev->dev_private;
5619
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6071
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
6072
	enum pipe pipe = intel_crtc->pipe;
5620
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6073
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5621
	uint32_t val;
6074
	uint32_t val;
5622
 
6075
 
5623
	val = 0;
6076
	val = 0;
5624
 
6077
 
5625
	if (intel_crtc->config.dither)
6078
	if (IS_HASWELL(dev) && intel_crtc->config.dither)
5626
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6079
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5627
 
6080
 
5628
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6081
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5629
		val |= PIPECONF_INTERLACED_ILK;
6082
		val |= PIPECONF_INTERLACED_ILK;
5630
	else
6083
	else
5631
		val |= PIPECONF_PROGRESSIVE;
6084
		val |= PIPECONF_PROGRESSIVE;
5632
 
6085
 
5633
	I915_WRITE(PIPECONF(cpu_transcoder), val);
6086
	I915_WRITE(PIPECONF(cpu_transcoder), val);
5634
	POSTING_READ(PIPECONF(cpu_transcoder));
6087
	POSTING_READ(PIPECONF(cpu_transcoder));
5635
 
6088
 
5636
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6089
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
5637
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
6090
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
-
 
6091
 
-
 
6092
	if (IS_BROADWELL(dev)) {
-
 
6093
		val = 0;
-
 
6094
 
-
 
6095
		switch (intel_crtc->config.pipe_bpp) {
-
 
6096
		case 18:
-
 
6097
			val |= PIPEMISC_DITHER_6_BPC;
-
 
6098
			break;
-
 
6099
		case 24:
-
 
6100
			val |= PIPEMISC_DITHER_8_BPC;
-
 
6101
			break;
-
 
6102
		case 30:
-
 
6103
			val |= PIPEMISC_DITHER_10_BPC;
-
 
6104
			break;
-
 
6105
		case 36:
-
 
6106
			val |= PIPEMISC_DITHER_12_BPC;
-
 
6107
			break;
-
 
6108
		default:
-
 
6109
			/* Case prevented by pipe_config_set_bpp. */
-
 
6110
			BUG();
-
 
6111
		}
-
 
6112
 
-
 
6113
		if (intel_crtc->config.dither)
-
 
6114
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
-
 
6115
 
-
 
6116
		I915_WRITE(PIPEMISC(pipe), val);
-
 
6117
	}
5638
}
6118
}
5639
 
6119
 
5640
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6120
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5641
				    intel_clock_t *clock,
6121
				    intel_clock_t *clock,
5642
				    bool *has_reduced_clock,
6122
				    bool *has_reduced_clock,
5643
				    intel_clock_t *reduced_clock)
6123
				    intel_clock_t *reduced_clock)
5644
{
6124
{
5645
	struct drm_device *dev = crtc->dev;
6125
	struct drm_device *dev = crtc->dev;
5646
	struct drm_i915_private *dev_priv = dev->dev_private;
6126
	struct drm_i915_private *dev_priv = dev->dev_private;
5647
	struct intel_encoder *intel_encoder;
6127
	struct intel_encoder *intel_encoder;
5648
	int refclk;
6128
	int refclk;
5649
	const intel_limit_t *limit;
6129
	const intel_limit_t *limit;
5650
	bool ret, is_lvds = false;
6130
	bool ret, is_lvds = false;
5651
 
6131
 
5652
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6132
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5653
		switch (intel_encoder->type) {
6133
		switch (intel_encoder->type) {
5654
		case INTEL_OUTPUT_LVDS:
6134
		case INTEL_OUTPUT_LVDS:
5655
			is_lvds = true;
6135
			is_lvds = true;
5656
			break;
6136
			break;
5657
		}
6137
		}
5658
	}
6138
	}
5659
 
6139
 
5660
	refclk = ironlake_get_refclk(crtc);
6140
	refclk = ironlake_get_refclk(crtc);
5661
 
6141
 
5662
	/*
6142
	/*
5663
	 * Returns a set of divisors for the desired target clock with the given
6143
	 * Returns a set of divisors for the desired target clock with the given
5664
	 * refclk, or FALSE.  The returned values represent the clock equation:
6144
	 * refclk, or FALSE.  The returned values represent the clock equation:
5665
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
6145
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5666
	 */
6146
	 */
5667
	limit = intel_limit(crtc, refclk);
6147
	limit = intel_limit(crtc, refclk);
5668
	ret = dev_priv->display.find_dpll(limit, crtc,
6148
	ret = dev_priv->display.find_dpll(limit, crtc,
5669
					  to_intel_crtc(crtc)->config.port_clock,
6149
					  to_intel_crtc(crtc)->config.port_clock,
5670
					  refclk, NULL, clock);
6150
					  refclk, NULL, clock);
5671
	if (!ret)
6151
	if (!ret)
5672
		return false;
6152
		return false;
5673
 
6153
 
5674
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6154
	if (is_lvds && dev_priv->lvds_downclock_avail) {
5675
		/*
6155
		/*
5676
		 * Ensure we match the reduced clock's P to the target clock.
6156
		 * Ensure we match the reduced clock's P to the target clock.
5677
		 * If the clocks don't match, we can't switch the display clock
6157
		 * If the clocks don't match, we can't switch the display clock
5678
		 * by using the FP0/FP1. In such case we will disable the LVDS
6158
		 * by using the FP0/FP1. In such case we will disable the LVDS
5679
		 * downclock feature.
6159
		 * downclock feature.
5680
		*/
6160
		*/
5681
		*has_reduced_clock =
6161
		*has_reduced_clock =
5682
			dev_priv->display.find_dpll(limit, crtc,
6162
			dev_priv->display.find_dpll(limit, crtc,
5683
						     dev_priv->lvds_downclock,
6163
						     dev_priv->lvds_downclock,
5684
						    refclk, clock,
6164
						    refclk, clock,
5685
						     reduced_clock);
6165
						     reduced_clock);
5686
	}
6166
	}
5687
 
6167
 
5688
	return true;
6168
	return true;
5689
}
6169
}
5690
 
6170
 
5691
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
6171
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5692
{
6172
{
5693
	/*
6173
	/*
5694
	 * Account for spread spectrum to avoid
6174
	 * Account for spread spectrum to avoid
5695
	 * oversubscribing the link. Max center spread
6175
	 * oversubscribing the link. Max center spread
5696
	 * is 2.5%; use 5% for safety's sake.
6176
	 * is 2.5%; use 5% for safety's sake.
5697
	 */
6177
	 */
5698
	u32 bps = target_clock * bpp * 21 / 20;
6178
	u32 bps = target_clock * bpp * 21 / 20;
5699
	return bps / (link_bw * 8) + 1;
6179
	return bps / (link_bw * 8) + 1;
5700
}
6180
}
5701
 
6181
 
5702
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6182
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
5703
{
6183
{
5704
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
6184
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
5705
}
6185
}
5706
 
6186
 
5707
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6187
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5708
				      u32 *fp,
6188
				      u32 *fp,
5709
				      intel_clock_t *reduced_clock, u32 *fp2)
6189
				      intel_clock_t *reduced_clock, u32 *fp2)
5710
{
6190
{
5711
	struct drm_crtc *crtc = &intel_crtc->base;
6191
	struct drm_crtc *crtc = &intel_crtc->base;
5712
	struct drm_device *dev = crtc->dev;
6192
	struct drm_device *dev = crtc->dev;
5713
	struct drm_i915_private *dev_priv = dev->dev_private;
6193
	struct drm_i915_private *dev_priv = dev->dev_private;
5714
	struct intel_encoder *intel_encoder;
6194
	struct intel_encoder *intel_encoder;
5715
	uint32_t dpll;
6195
	uint32_t dpll;
5716
	int factor, num_connectors = 0;
6196
	int factor, num_connectors = 0;
5717
	bool is_lvds = false, is_sdvo = false;
6197
	bool is_lvds = false, is_sdvo = false;
5718
 
6198
 
5719
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6199
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5720
		switch (intel_encoder->type) {
6200
		switch (intel_encoder->type) {
5721
		case INTEL_OUTPUT_LVDS:
6201
		case INTEL_OUTPUT_LVDS:
5722
			is_lvds = true;
6202
			is_lvds = true;
5723
			break;
6203
			break;
5724
		case INTEL_OUTPUT_SDVO:
6204
		case INTEL_OUTPUT_SDVO:
5725
		case INTEL_OUTPUT_HDMI:
6205
		case INTEL_OUTPUT_HDMI:
5726
			is_sdvo = true;
6206
			is_sdvo = true;
5727
			break;
6207
			break;
5728
		}
6208
		}
5729
 
6209
 
5730
		num_connectors++;
6210
		num_connectors++;
5731
	}
6211
	}
5732
 
6212
 
5733
    /* Enable autotuning of the PLL clock (if permissible) */
6213
    /* Enable autotuning of the PLL clock (if permissible) */
5734
    factor = 21;
6214
    factor = 21;
5735
    if (is_lvds) {
6215
    if (is_lvds) {
5736
        if ((intel_panel_use_ssc(dev_priv) &&
6216
        if ((intel_panel_use_ssc(dev_priv) &&
5737
		     dev_priv->vbt.lvds_ssc_freq == 100) ||
6217
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
5738
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
6218
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
5739
            factor = 25;
6219
            factor = 25;
5740
	} else if (intel_crtc->config.sdvo_tv_clock)
6220
	} else if (intel_crtc->config.sdvo_tv_clock)
5741
        factor = 20;
6221
        factor = 20;
5742
 
6222
 
5743
	if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
6223
	if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
5744
		*fp |= FP_CB_TUNE;
6224
		*fp |= FP_CB_TUNE;
5745
 
6225
 
5746
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
6226
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
5747
		*fp2 |= FP_CB_TUNE;
6227
		*fp2 |= FP_CB_TUNE;
5748
 
6228
 
5749
    dpll = 0;
6229
    dpll = 0;
5750
 
6230
 
5751
    if (is_lvds)
6231
    if (is_lvds)
5752
        dpll |= DPLLB_MODE_LVDS;
6232
        dpll |= DPLLB_MODE_LVDS;
5753
    else
6233
    else
5754
        dpll |= DPLLB_MODE_DAC_SERIAL;
6234
        dpll |= DPLLB_MODE_DAC_SERIAL;
5755
 
6235
 
5756
			dpll |= (intel_crtc->config.pixel_multiplier - 1)
6236
			dpll |= (intel_crtc->config.pixel_multiplier - 1)
5757
				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
6237
				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5758
 
6238
 
5759
	if (is_sdvo)
6239
	if (is_sdvo)
5760
		dpll |= DPLL_SDVO_HIGH_SPEED;
6240
		dpll |= DPLL_SDVO_HIGH_SPEED;
5761
	if (intel_crtc->config.has_dp_encoder)
6241
	if (intel_crtc->config.has_dp_encoder)
5762
		dpll |= DPLL_SDVO_HIGH_SPEED;
6242
		dpll |= DPLL_SDVO_HIGH_SPEED;
5763
 
6243
 
5764
    /* compute bitmask from p1 value */
6244
    /* compute bitmask from p1 value */
5765
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6245
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5766
    /* also FPA1 */
6246
    /* also FPA1 */
5767
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6247
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5768
 
6248
 
5769
	switch (intel_crtc->config.dpll.p2) {
6249
	switch (intel_crtc->config.dpll.p2) {
5770
    case 5:
6250
    case 5:
5771
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6251
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5772
        break;
6252
        break;
5773
    case 7:
6253
    case 7:
5774
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6254
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5775
        break;
6255
        break;
5776
    case 10:
6256
    case 10:
5777
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6257
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5778
        break;
6258
        break;
5779
    case 14:
6259
    case 14:
5780
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6260
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5781
        break;
6261
        break;
5782
    }
6262
    }
5783
 
6263
 
5784
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6264
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5785
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6265
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5786
    else
6266
    else
5787
        dpll |= PLL_REF_INPUT_DREFCLK;
6267
        dpll |= PLL_REF_INPUT_DREFCLK;
5788
 
6268
 
5789
	return dpll | DPLL_VCO_ENABLE;
6269
	return dpll | DPLL_VCO_ENABLE;
5790
}
6270
}
5791
 
6271
 
5792
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6272
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5793
				  int x, int y,
6273
				  int x, int y,
5794
				  struct drm_framebuffer *fb)
6274
				  struct drm_framebuffer *fb)
5795
{
6275
{
5796
	struct drm_device *dev = crtc->dev;
6276
	struct drm_device *dev = crtc->dev;
5797
	struct drm_i915_private *dev_priv = dev->dev_private;
6277
	struct drm_i915_private *dev_priv = dev->dev_private;
5798
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6278
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5799
	int pipe = intel_crtc->pipe;
6279
	int pipe = intel_crtc->pipe;
5800
	int plane = intel_crtc->plane;
6280
	int plane = intel_crtc->plane;
5801
	int num_connectors = 0;
6281
	int num_connectors = 0;
5802
	intel_clock_t clock, reduced_clock;
6282
	intel_clock_t clock, reduced_clock;
5803
	u32 dpll = 0, fp = 0, fp2 = 0;
6283
	u32 dpll = 0, fp = 0, fp2 = 0;
5804
	bool ok, has_reduced_clock = false;
6284
	bool ok, has_reduced_clock = false;
5805
	bool is_lvds = false;
6285
	bool is_lvds = false;
5806
	struct intel_encoder *encoder;
6286
	struct intel_encoder *encoder;
5807
	struct intel_shared_dpll *pll;
6287
	struct intel_shared_dpll *pll;
5808
	int ret;
6288
	int ret;
5809
 
6289
 
5810
	for_each_encoder_on_crtc(dev, crtc, encoder) {
6290
	for_each_encoder_on_crtc(dev, crtc, encoder) {
5811
		switch (encoder->type) {
6291
		switch (encoder->type) {
5812
		case INTEL_OUTPUT_LVDS:
6292
		case INTEL_OUTPUT_LVDS:
5813
			is_lvds = true;
6293
			is_lvds = true;
5814
			break;
6294
			break;
5815
		}
6295
		}
5816
 
6296
 
5817
		num_connectors++;
6297
		num_connectors++;
5818
	}
6298
	}
5819
 
6299
 
5820
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
6300
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5821
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
6301
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
5822
 
6302
 
5823
	ok = ironlake_compute_clocks(crtc, &clock,
6303
	ok = ironlake_compute_clocks(crtc, &clock,
5824
				     &has_reduced_clock, &reduced_clock);
6304
				     &has_reduced_clock, &reduced_clock);
5825
	if (!ok && !intel_crtc->config.clock_set) {
6305
	if (!ok && !intel_crtc->config.clock_set) {
5826
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
6306
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5827
		return -EINVAL;
6307
		return -EINVAL;
5828
	}
6308
	}
5829
	/* Compat-code for transition, will disappear. */
6309
	/* Compat-code for transition, will disappear. */
5830
	if (!intel_crtc->config.clock_set) {
6310
	if (!intel_crtc->config.clock_set) {
5831
		intel_crtc->config.dpll.n = clock.n;
6311
		intel_crtc->config.dpll.n = clock.n;
5832
		intel_crtc->config.dpll.m1 = clock.m1;
6312
		intel_crtc->config.dpll.m1 = clock.m1;
5833
		intel_crtc->config.dpll.m2 = clock.m2;
6313
		intel_crtc->config.dpll.m2 = clock.m2;
5834
		intel_crtc->config.dpll.p1 = clock.p1;
6314
		intel_crtc->config.dpll.p1 = clock.p1;
5835
		intel_crtc->config.dpll.p2 = clock.p2;
6315
		intel_crtc->config.dpll.p2 = clock.p2;
5836
	}
6316
	}
5837
 
6317
 
5838
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
6318
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5839
	if (intel_crtc->config.has_pch_encoder) {
6319
	if (intel_crtc->config.has_pch_encoder) {
5840
		fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
6320
		fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
5841
	if (has_reduced_clock)
6321
	if (has_reduced_clock)
5842
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
6322
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
5843
 
6323
 
5844
		dpll = ironlake_compute_dpll(intel_crtc,
6324
		dpll = ironlake_compute_dpll(intel_crtc,
5845
					     &fp, &reduced_clock,
6325
					     &fp, &reduced_clock,
5846
				     has_reduced_clock ? &fp2 : NULL);
6326
				     has_reduced_clock ? &fp2 : NULL);
5847
 
6327
 
5848
		intel_crtc->config.dpll_hw_state.dpll = dpll;
6328
		intel_crtc->config.dpll_hw_state.dpll = dpll;
5849
		intel_crtc->config.dpll_hw_state.fp0 = fp;
6329
		intel_crtc->config.dpll_hw_state.fp0 = fp;
5850
		if (has_reduced_clock)
6330
		if (has_reduced_clock)
5851
			intel_crtc->config.dpll_hw_state.fp1 = fp2;
6331
			intel_crtc->config.dpll_hw_state.fp1 = fp2;
5852
		else
6332
		else
5853
			intel_crtc->config.dpll_hw_state.fp1 = fp;
6333
			intel_crtc->config.dpll_hw_state.fp1 = fp;
5854
 
6334
 
5855
		pll = intel_get_shared_dpll(intel_crtc);
6335
		pll = intel_get_shared_dpll(intel_crtc);
5856
		if (pll == NULL) {
6336
		if (pll == NULL) {
5857
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
6337
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
5858
					 pipe_name(pipe));
6338
					 pipe_name(pipe));
5859
			return -EINVAL;
6339
			return -EINVAL;
5860
        }
6340
        }
5861
	} else
6341
	} else
5862
		intel_put_shared_dpll(intel_crtc);
6342
		intel_put_shared_dpll(intel_crtc);
5863
 
6343
 
5864
	if (intel_crtc->config.has_dp_encoder)
6344
	if (intel_crtc->config.has_dp_encoder)
5865
		intel_dp_set_m_n(intel_crtc);
6345
		intel_dp_set_m_n(intel_crtc);
5866
 
6346
 
5867
	if (is_lvds && has_reduced_clock && i915_powersave)
6347
	if (is_lvds && has_reduced_clock && i915_powersave)
5868
		intel_crtc->lowfreq_avail = true;
6348
		intel_crtc->lowfreq_avail = true;
5869
	else
6349
	else
5870
		intel_crtc->lowfreq_avail = false;
6350
		intel_crtc->lowfreq_avail = false;
5871
 
-
 
5872
	if (intel_crtc->config.has_pch_encoder) {
-
 
5873
		pll = intel_crtc_to_shared_dpll(intel_crtc);
-
 
5874
 
-
 
5875
	}
-
 
5876
 
6351
 
5877
	intel_set_pipe_timings(intel_crtc);
6352
	intel_set_pipe_timings(intel_crtc);
5878
 
6353
 
5879
	if (intel_crtc->config.has_pch_encoder) {
6354
	if (intel_crtc->config.has_pch_encoder) {
5880
		intel_cpu_transcoder_set_m_n(intel_crtc,
6355
		intel_cpu_transcoder_set_m_n(intel_crtc,
5881
					     &intel_crtc->config.fdi_m_n);
6356
					     &intel_crtc->config.fdi_m_n);
5882
	}
6357
	}
5883
 
6358
 
5884
	ironlake_set_pipeconf(crtc);
6359
	ironlake_set_pipeconf(crtc);
5885
 
6360
 
5886
	/* Set up the display plane register */
6361
	/* Set up the display plane register */
5887
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
6362
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
5888
	POSTING_READ(DSPCNTR(plane));
6363
	POSTING_READ(DSPCNTR(plane));
5889
 
6364
 
5890
	ret = intel_pipe_set_base(crtc, x, y, fb);
6365
	ret = intel_pipe_set_base(crtc, x, y, fb);
5891
 
-
 
5892
	intel_update_watermarks(dev);
-
 
5893
 
6366
 
5894
	return ret;
6367
	return ret;
5895
}
6368
}
5896
 
6369
 
5897
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
6370
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5898
					struct intel_crtc_config *pipe_config)
6371
					 struct intel_link_m_n *m_n)
5899
{
6372
{
5900
	struct drm_device *dev = crtc->base.dev;
6373
	struct drm_device *dev = crtc->base.dev;
5901
	struct drm_i915_private *dev_priv = dev->dev_private;
6374
	struct drm_i915_private *dev_priv = dev->dev_private;
5902
	enum transcoder transcoder = pipe_config->cpu_transcoder;
6375
	enum pipe pipe = crtc->pipe;
5903
 
6376
 
5904
	pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
6377
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
5905
	pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
6378
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
5906
	pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
6379
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
5907
					& ~TU_SIZE_MASK;
6380
		& ~TU_SIZE_MASK;
5908
	pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
6381
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
5909
	pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
6382
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
5910
				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6383
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5911
}
6384
}
-
 
6385
 
-
 
6386
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
-
 
6387
					 enum transcoder transcoder,
-
 
6388
					 struct intel_link_m_n *m_n)
-
 
6389
{
-
 
6390
	struct drm_device *dev = crtc->base.dev;
-
 
6391
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
6392
	enum pipe pipe = crtc->pipe;
-
 
6393
 
-
 
6394
	if (INTEL_INFO(dev)->gen >= 5) {
-
 
6395
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
-
 
6396
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
-
 
6397
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
-
 
6398
					& ~TU_SIZE_MASK;
-
 
6399
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
-
 
6400
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
-
 
6401
				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
-
 
6402
	} else {
-
 
6403
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
-
 
6404
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
-
 
6405
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
-
 
6406
			& ~TU_SIZE_MASK;
-
 
6407
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
-
 
6408
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
-
 
6409
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
-
 
6410
	}
-
 
6411
}
-
 
6412
 
-
 
6413
void intel_dp_get_m_n(struct intel_crtc *crtc,
-
 
6414
		      struct intel_crtc_config *pipe_config)
-
 
6415
{
-
 
6416
	if (crtc->config.has_pch_encoder)
-
 
6417
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
-
 
6418
	else
-
 
6419
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
-
 
6420
					     &pipe_config->dp_m_n);
-
 
6421
}
-
 
6422
 
-
 
6423
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
-
 
6424
					struct intel_crtc_config *pipe_config)
-
 
6425
{
-
 
6426
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
-
 
6427
				     &pipe_config->fdi_m_n);
-
 
6428
}
5912
 
6429
 
5913
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
6430
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
5914
				     struct intel_crtc_config *pipe_config)
6431
				     struct intel_crtc_config *pipe_config)
5915
{
6432
{
5916
	struct drm_device *dev = crtc->base.dev;
6433
	struct drm_device *dev = crtc->base.dev;
5917
	struct drm_i915_private *dev_priv = dev->dev_private;
6434
	struct drm_i915_private *dev_priv = dev->dev_private;
5918
	uint32_t tmp;
6435
	uint32_t tmp;
5919
 
6436
 
5920
	tmp = I915_READ(PF_CTL(crtc->pipe));
6437
	tmp = I915_READ(PF_CTL(crtc->pipe));
5921
 
6438
 
5922
	if (tmp & PF_ENABLE) {
6439
	if (tmp & PF_ENABLE) {
5923
		pipe_config->pch_pfit.enabled = true;
6440
		pipe_config->pch_pfit.enabled = true;
5924
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
6441
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
5925
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
6442
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
5926
 
6443
 
5927
		/* We currently do not free assignements of panel fitters on
6444
		/* We currently do not free assignements of panel fitters on
5928
		 * ivb/hsw (since we don't use the higher upscaling modes which
6445
		 * ivb/hsw (since we don't use the higher upscaling modes which
5929
		 * differentiates them) so just WARN about this case for now. */
6446
		 * differentiates them) so just WARN about this case for now. */
5930
		if (IS_GEN7(dev)) {
6447
		if (IS_GEN7(dev)) {
5931
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
6448
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
5932
				PF_PIPE_SEL_IVB(crtc->pipe));
6449
				PF_PIPE_SEL_IVB(crtc->pipe));
5933
		}
6450
		}
5934
	}
6451
	}
5935
}
6452
}
5936
 
6453
 
5937
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
6454
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5938
				     struct intel_crtc_config *pipe_config)
6455
				     struct intel_crtc_config *pipe_config)
5939
{
6456
{
5940
	struct drm_device *dev = crtc->base.dev;
6457
	struct drm_device *dev = crtc->base.dev;
5941
	struct drm_i915_private *dev_priv = dev->dev_private;
6458
	struct drm_i915_private *dev_priv = dev->dev_private;
5942
	uint32_t tmp;
6459
	uint32_t tmp;
5943
 
6460
 
5944
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6461
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5945
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6462
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5946
 
6463
 
5947
	tmp = I915_READ(PIPECONF(crtc->pipe));
6464
	tmp = I915_READ(PIPECONF(crtc->pipe));
5948
	if (!(tmp & PIPECONF_ENABLE))
6465
	if (!(tmp & PIPECONF_ENABLE))
5949
		return false;
6466
		return false;
5950
 
6467
 
5951
	switch (tmp & PIPECONF_BPC_MASK) {
6468
	switch (tmp & PIPECONF_BPC_MASK) {
5952
	case PIPECONF_6BPC:
6469
	case PIPECONF_6BPC:
5953
		pipe_config->pipe_bpp = 18;
6470
		pipe_config->pipe_bpp = 18;
5954
		break;
6471
		break;
5955
	case PIPECONF_8BPC:
6472
	case PIPECONF_8BPC:
5956
		pipe_config->pipe_bpp = 24;
6473
		pipe_config->pipe_bpp = 24;
5957
		break;
6474
		break;
5958
	case PIPECONF_10BPC:
6475
	case PIPECONF_10BPC:
5959
		pipe_config->pipe_bpp = 30;
6476
		pipe_config->pipe_bpp = 30;
5960
		break;
6477
		break;
5961
	case PIPECONF_12BPC:
6478
	case PIPECONF_12BPC:
5962
		pipe_config->pipe_bpp = 36;
6479
		pipe_config->pipe_bpp = 36;
5963
		break;
6480
		break;
5964
	default:
6481
	default:
5965
		break;
6482
		break;
5966
	}
6483
	}
5967
 
6484
 
5968
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6485
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5969
		struct intel_shared_dpll *pll;
6486
		struct intel_shared_dpll *pll;
5970
 
6487
 
5971
		pipe_config->has_pch_encoder = true;
6488
		pipe_config->has_pch_encoder = true;
5972
 
6489
 
5973
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
6490
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
5974
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6491
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5975
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6492
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
5976
 
6493
 
5977
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
6494
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
5978
 
6495
 
5979
		if (HAS_PCH_IBX(dev_priv->dev)) {
6496
		if (HAS_PCH_IBX(dev_priv->dev)) {
5980
			pipe_config->shared_dpll =
6497
			pipe_config->shared_dpll =
5981
				(enum intel_dpll_id) crtc->pipe;
6498
				(enum intel_dpll_id) crtc->pipe;
5982
		} else {
6499
		} else {
5983
			tmp = I915_READ(PCH_DPLL_SEL);
6500
			tmp = I915_READ(PCH_DPLL_SEL);
5984
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6501
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
5985
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
6502
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
5986
			else
6503
			else
5987
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
6504
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
5988
		}
6505
		}
5989
 
6506
 
5990
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
6507
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
5991
 
6508
 
5992
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
6509
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
5993
					   &pipe_config->dpll_hw_state));
6510
					   &pipe_config->dpll_hw_state));
5994
 
6511
 
5995
		tmp = pipe_config->dpll_hw_state.dpll;
6512
		tmp = pipe_config->dpll_hw_state.dpll;
5996
		pipe_config->pixel_multiplier =
6513
		pipe_config->pixel_multiplier =
5997
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6514
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5998
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6515
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
-
 
6516
 
-
 
6517
		ironlake_pch_clock_get(crtc, pipe_config);
5999
	} else {
6518
	} else {
6000
		pipe_config->pixel_multiplier = 1;
6519
		pipe_config->pixel_multiplier = 1;
6001
	}
6520
	}
6002
 
6521
 
6003
	intel_get_pipe_timings(crtc, pipe_config);
6522
	intel_get_pipe_timings(crtc, pipe_config);
6004
 
6523
 
6005
	ironlake_get_pfit_config(crtc, pipe_config);
6524
	ironlake_get_pfit_config(crtc, pipe_config);
6006
 
6525
 
6007
	return true;
6526
	return true;
6008
}
6527
}
6009
 
6528
 
6010
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6529
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6011
{
6530
{
6012
	struct drm_device *dev = dev_priv->dev;
6531
	struct drm_device *dev = dev_priv->dev;
6013
	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
6532
	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
6014
	struct intel_crtc *crtc;
6533
	struct intel_crtc *crtc;
6015
	unsigned long irqflags;
6534
	unsigned long irqflags;
6016
	uint32_t val;
6535
	uint32_t val;
6017
 
6536
 
6018
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6537
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6019
		WARN(crtc->active, "CRTC for pipe %c enabled\n",
6538
		WARN(crtc->active, "CRTC for pipe %c enabled\n",
6020
		     pipe_name(crtc->pipe));
6539
		     pipe_name(crtc->pipe));
6021
 
6540
 
6022
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
6541
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
6023
	WARN(plls->spll_refcount, "SPLL enabled\n");
6542
	WARN(plls->spll_refcount, "SPLL enabled\n");
6024
	WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
6543
	WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
6025
	WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
6544
	WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
6026
	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
6545
	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
6027
	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
6546
	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
6028
	     "CPU PWM1 enabled\n");
6547
	     "CPU PWM1 enabled\n");
6029
	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
6548
	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
6030
	     "CPU PWM2 enabled\n");
6549
	     "CPU PWM2 enabled\n");
6031
	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
6550
	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
6032
	     "PCH PWM1 enabled\n");
6551
	     "PCH PWM1 enabled\n");
6033
	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
6552
	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
6034
	     "Utility pin enabled\n");
6553
	     "Utility pin enabled\n");
6035
	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
6554
	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
6036
 
6555
 
6037
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
6556
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
6038
	val = I915_READ(DEIMR);
6557
	val = I915_READ(DEIMR);
6039
	WARN((val & ~DE_PCH_EVENT_IVB) != val,
6558
	WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
6040
	     "Unexpected DEIMR bits enabled: 0x%x\n", val);
6559
	     "Unexpected DEIMR bits enabled: 0x%x\n", val);
6041
	val = I915_READ(SDEIMR);
6560
	val = I915_READ(SDEIMR);
6042
	WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
6561
	WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
6043
	     "Unexpected SDEIMR bits enabled: 0x%x\n", val);
6562
	     "Unexpected SDEIMR bits enabled: 0x%x\n", val);
6044
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
6563
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
6045
}
6564
}
6046
 
6565
 
6047
/*
6566
/*
6048
 * This function implements pieces of two sequences from BSpec:
6567
 * This function implements pieces of two sequences from BSpec:
6049
 * - Sequence for display software to disable LCPLL
6568
 * - Sequence for display software to disable LCPLL
6050
 * - Sequence for display software to allow package C8+
6569
 * - Sequence for display software to allow package C8+
6051
 * The steps implemented here are just the steps that actually touch the LCPLL
6570
 * The steps implemented here are just the steps that actually touch the LCPLL
6052
 * register. Callers should take care of disabling all the display engine
6571
 * register. Callers should take care of disabling all the display engine
6053
 * functions, doing the mode unset, fixing interrupts, etc.
6572
 * functions, doing the mode unset, fixing interrupts, etc.
6054
 */
6573
 */
6055
void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6574
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6056
		       bool switch_to_fclk, bool allow_power_down)
6575
		       bool switch_to_fclk, bool allow_power_down)
6057
{
6576
{
6058
	uint32_t val;
6577
	uint32_t val;
6059
 
6578
 
6060
	assert_can_disable_lcpll(dev_priv);
6579
	assert_can_disable_lcpll(dev_priv);
6061
 
6580
 
6062
	val = I915_READ(LCPLL_CTL);
6581
	val = I915_READ(LCPLL_CTL);
6063
 
6582
 
6064
	if (switch_to_fclk) {
6583
	if (switch_to_fclk) {
6065
		val |= LCPLL_CD_SOURCE_FCLK;
6584
		val |= LCPLL_CD_SOURCE_FCLK;
6066
		I915_WRITE(LCPLL_CTL, val);
6585
		I915_WRITE(LCPLL_CTL, val);
6067
 
6586
 
6068
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
6587
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
6069
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
6588
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
6070
			DRM_ERROR("Switching to FCLK failed\n");
6589
			DRM_ERROR("Switching to FCLK failed\n");
6071
 
6590
 
6072
		val = I915_READ(LCPLL_CTL);
6591
		val = I915_READ(LCPLL_CTL);
6073
	}
6592
	}
6074
 
6593
 
6075
	val |= LCPLL_PLL_DISABLE;
6594
	val |= LCPLL_PLL_DISABLE;
6076
	I915_WRITE(LCPLL_CTL, val);
6595
	I915_WRITE(LCPLL_CTL, val);
6077
	POSTING_READ(LCPLL_CTL);
6596
	POSTING_READ(LCPLL_CTL);
6078
 
6597
 
6079
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
6598
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
6080
		DRM_ERROR("LCPLL still locked\n");
6599
		DRM_ERROR("LCPLL still locked\n");
6081
 
6600
 
6082
	val = I915_READ(D_COMP);
6601
	val = I915_READ(D_COMP);
6083
	val |= D_COMP_COMP_DISABLE;
6602
	val |= D_COMP_COMP_DISABLE;
-
 
6603
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
6604
	if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6084
	I915_WRITE(D_COMP, val);
6605
		DRM_ERROR("Failed to disable D_COMP\n");
-
 
6606
	mutex_unlock(&dev_priv->rps.hw_lock);
6085
	POSTING_READ(D_COMP);
6607
	POSTING_READ(D_COMP);
6086
    udelay(100);
6608
    delay(1);
6087
 
6609
 
6088
	if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
6610
	if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
6089
		DRM_ERROR("D_COMP RCOMP still in progress\n");
6611
		DRM_ERROR("D_COMP RCOMP still in progress\n");
6090
 
6612
 
6091
	if (allow_power_down) {
6613
	if (allow_power_down) {
6092
		val = I915_READ(LCPLL_CTL);
6614
		val = I915_READ(LCPLL_CTL);
6093
		val |= LCPLL_POWER_DOWN_ALLOW;
6615
		val |= LCPLL_POWER_DOWN_ALLOW;
6094
		I915_WRITE(LCPLL_CTL, val);
6616
		I915_WRITE(LCPLL_CTL, val);
6095
		POSTING_READ(LCPLL_CTL);
6617
		POSTING_READ(LCPLL_CTL);
6096
	}
6618
	}
6097
}
6619
}
6098
 
6620
 
6099
/*
6621
/*
6100
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6622
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6101
 * source.
6623
 * source.
6102
 */
6624
 */
6103
void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6625
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6104
{
6626
{
6105
	uint32_t val;
6627
	uint32_t val;
6106
 
6628
 
6107
	val = I915_READ(LCPLL_CTL);
6629
	val = I915_READ(LCPLL_CTL);
6108
 
6630
 
6109
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
6631
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
6110
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
6632
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
6111
		return;
6633
		return;
6112
 
6634
 
6113
	/* Make sure we're not on PC8 state before disabling PC8, otherwise
6635
	/* Make sure we're not on PC8 state before disabling PC8, otherwise
6114
	 * we'll hang the machine! */
6636
	 * we'll hang the machine! */
6115
	gen6_gt_force_wake_get(dev_priv);
6637
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
6116
 
6638
 
6117
	if (val & LCPLL_POWER_DOWN_ALLOW) {
6639
	if (val & LCPLL_POWER_DOWN_ALLOW) {
6118
		val &= ~LCPLL_POWER_DOWN_ALLOW;
6640
		val &= ~LCPLL_POWER_DOWN_ALLOW;
6119
		I915_WRITE(LCPLL_CTL, val);
6641
		I915_WRITE(LCPLL_CTL, val);
6120
		POSTING_READ(LCPLL_CTL);
6642
		POSTING_READ(LCPLL_CTL);
6121
	}
6643
	}
6122
 
6644
 
6123
	val = I915_READ(D_COMP);
6645
	val = I915_READ(D_COMP);
6124
	val |= D_COMP_COMP_FORCE;
6646
	val |= D_COMP_COMP_FORCE;
6125
	val &= ~D_COMP_COMP_DISABLE;
6647
	val &= ~D_COMP_COMP_DISABLE;
-
 
6648
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
6649
	if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6126
	I915_WRITE(D_COMP, val);
6650
		DRM_ERROR("Failed to enable D_COMP\n");
-
 
6651
	mutex_unlock(&dev_priv->rps.hw_lock);
6127
	POSTING_READ(D_COMP);
6652
	POSTING_READ(D_COMP);
6128
 
6653
 
6129
	val = I915_READ(LCPLL_CTL);
6654
	val = I915_READ(LCPLL_CTL);
6130
	val &= ~LCPLL_PLL_DISABLE;
6655
	val &= ~LCPLL_PLL_DISABLE;
6131
	I915_WRITE(LCPLL_CTL, val);
6656
	I915_WRITE(LCPLL_CTL, val);
6132
 
6657
 
6133
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
6658
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
6134
		DRM_ERROR("LCPLL not locked yet\n");
6659
		DRM_ERROR("LCPLL not locked yet\n");
6135
 
6660
 
6136
	if (val & LCPLL_CD_SOURCE_FCLK) {
6661
	if (val & LCPLL_CD_SOURCE_FCLK) {
6137
		val = I915_READ(LCPLL_CTL);
6662
		val = I915_READ(LCPLL_CTL);
6138
		val &= ~LCPLL_CD_SOURCE_FCLK;
6663
		val &= ~LCPLL_CD_SOURCE_FCLK;
6139
		I915_WRITE(LCPLL_CTL, val);
6664
		I915_WRITE(LCPLL_CTL, val);
6140
 
6665
 
6141
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
6666
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
6142
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
6667
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
6143
			DRM_ERROR("Switching back to LCPLL failed\n");
6668
			DRM_ERROR("Switching back to LCPLL failed\n");
6144
	}
6669
	}
6145
 
6670
 
6146
	gen6_gt_force_wake_put(dev_priv);
6671
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
6147
}
6672
}
6148
 
6673
 
6149
void hsw_enable_pc8_work(struct work_struct *__work)
6674
void hsw_enable_pc8_work(struct work_struct *__work)
6150
{
6675
{
6151
	struct drm_i915_private *dev_priv =
6676
	struct drm_i915_private *dev_priv =
6152
		container_of(to_delayed_work(__work), struct drm_i915_private,
6677
		container_of(to_delayed_work(__work), struct drm_i915_private,
6153
			     pc8.enable_work);
6678
			     pc8.enable_work);
6154
	struct drm_device *dev = dev_priv->dev;
6679
	struct drm_device *dev = dev_priv->dev;
6155
	uint32_t val;
6680
	uint32_t val;
-
 
6681
 
-
 
6682
	WARN_ON(!HAS_PC8(dev));
6156
 
6683
 
6157
	if (dev_priv->pc8.enabled)
6684
	if (dev_priv->pc8.enabled)
6158
		return;
6685
		return;
6159
 
6686
 
6160
	DRM_DEBUG_KMS("Enabling package C8+\n");
6687
	DRM_DEBUG_KMS("Enabling package C8+\n");
6161
 
6688
 
6162
	dev_priv->pc8.enabled = true;
6689
	dev_priv->pc8.enabled = true;
6163
 
6690
 
6164
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6691
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6165
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
6692
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
6166
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6693
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6167
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6694
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6168
	}
6695
	}
6169
 
6696
 
6170
	lpt_disable_clkout_dp(dev);
6697
	lpt_disable_clkout_dp(dev);
6171
	hsw_pc8_disable_interrupts(dev);
6698
	hsw_pc8_disable_interrupts(dev);
6172
	hsw_disable_lcpll(dev_priv, true, true);
6699
	hsw_disable_lcpll(dev_priv, true, true);
-
 
6700
 
-
 
6701
	intel_runtime_pm_put(dev_priv);
6173
}
6702
}
6174
 
6703
 
6175
static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6704
static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6176
{
6705
{
6177
	WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6706
	WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6178
	WARN(dev_priv->pc8.disable_count < 1,
6707
	WARN(dev_priv->pc8.disable_count < 1,
6179
	     "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6708
	     "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6180
 
6709
 
6181
	dev_priv->pc8.disable_count--;
6710
	dev_priv->pc8.disable_count--;
6182
	if (dev_priv->pc8.disable_count != 0)
6711
	if (dev_priv->pc8.disable_count != 0)
6183
		return;
6712
		return;
6184
 
6713
 
6185
	schedule_delayed_work(&dev_priv->pc8.enable_work,
6714
	schedule_delayed_work(&dev_priv->pc8.enable_work,
6186
			      msecs_to_jiffies(i915_pc8_timeout));
6715
			      msecs_to_jiffies(i915_pc8_timeout));
6187
}
6716
}
6188
 
6717
 
6189
static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6718
static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6190
{
6719
{
6191
	struct drm_device *dev = dev_priv->dev;
6720
	struct drm_device *dev = dev_priv->dev;
6192
	uint32_t val;
6721
	uint32_t val;
6193
 
6722
 
6194
	WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6723
	WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6195
	WARN(dev_priv->pc8.disable_count < 0,
6724
	WARN(dev_priv->pc8.disable_count < 0,
6196
	     "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6725
	     "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6197
 
6726
 
6198
	dev_priv->pc8.disable_count++;
6727
	dev_priv->pc8.disable_count++;
6199
	if (dev_priv->pc8.disable_count != 1)
6728
	if (dev_priv->pc8.disable_count != 1)
6200
		return;
6729
		return;
-
 
6730
 
-
 
6731
	WARN_ON(!HAS_PC8(dev));
6201
 
6732
 
6202
	cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
6733
	cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
6203
	if (!dev_priv->pc8.enabled)
6734
	if (!dev_priv->pc8.enabled)
6204
		return;
6735
		return;
6205
 
6736
 
6206
	DRM_DEBUG_KMS("Disabling package C8+\n");
6737
	DRM_DEBUG_KMS("Disabling package C8+\n");
-
 
6738
 
-
 
6739
	intel_runtime_pm_get(dev_priv);
6207
 
6740
 
6208
	hsw_restore_lcpll(dev_priv);
6741
	hsw_restore_lcpll(dev_priv);
6209
	hsw_pc8_restore_interrupts(dev);
6742
	hsw_pc8_restore_interrupts(dev);
6210
	lpt_init_pch_refclk(dev);
6743
	lpt_init_pch_refclk(dev);
6211
 
6744
 
6212
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6745
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6213
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
6746
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
6214
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
6747
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
6215
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6748
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6216
	}
6749
	}
6217
 
6750
 
6218
	intel_prepare_ddi(dev);
6751
	intel_prepare_ddi(dev);
6219
	i915_gem_init_swizzling(dev);
6752
	i915_gem_init_swizzling(dev);
6220
	mutex_lock(&dev_priv->rps.hw_lock);
6753
	mutex_lock(&dev_priv->rps.hw_lock);
6221
	gen6_update_ring_freq(dev);
6754
	gen6_update_ring_freq(dev);
6222
	mutex_unlock(&dev_priv->rps.hw_lock);
6755
	mutex_unlock(&dev_priv->rps.hw_lock);
6223
	dev_priv->pc8.enabled = false;
6756
	dev_priv->pc8.enabled = false;
6224
}
6757
}
6225
 
6758
 
6226
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6759
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6227
{
6760
{
-
 
6761
	if (!HAS_PC8(dev_priv->dev))
-
 
6762
		return;
-
 
6763
 
6228
	mutex_lock(&dev_priv->pc8.lock);
6764
	mutex_lock(&dev_priv->pc8.lock);
6229
	__hsw_enable_package_c8(dev_priv);
6765
	__hsw_enable_package_c8(dev_priv);
6230
	mutex_unlock(&dev_priv->pc8.lock);
6766
	mutex_unlock(&dev_priv->pc8.lock);
6231
}
6767
}
6232
 
6768
 
6233
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6769
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6234
{
6770
{
-
 
6771
	if (!HAS_PC8(dev_priv->dev))
-
 
6772
		return;
-
 
6773
 
6235
	mutex_lock(&dev_priv->pc8.lock);
6774
	mutex_lock(&dev_priv->pc8.lock);
6236
	__hsw_disable_package_c8(dev_priv);
6775
	__hsw_disable_package_c8(dev_priv);
6237
	mutex_unlock(&dev_priv->pc8.lock);
6776
	mutex_unlock(&dev_priv->pc8.lock);
6238
}
6777
}
6239
 
6778
 
6240
static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
6779
static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
6241
{
6780
{
6242
	struct drm_device *dev = dev_priv->dev;
6781
	struct drm_device *dev = dev_priv->dev;
6243
	struct intel_crtc *crtc;
6782
	struct intel_crtc *crtc;
6244
	uint32_t val;
6783
	uint32_t val;
6245
 
6784
 
6246
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6785
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6247
		if (crtc->base.enabled)
6786
		if (crtc->base.enabled)
6248
			return false;
6787
			return false;
6249
 
6788
 
6250
	/* This case is still possible since we have the i915.disable_power_well
6789
	/* This case is still possible since we have the i915.disable_power_well
6251
	 * parameter and also the KVMr or something else might be requesting the
6790
	 * parameter and also the KVMr or something else might be requesting the
6252
	 * power well. */
6791
	 * power well. */
6253
	val = I915_READ(HSW_PWR_WELL_DRIVER);
6792
	val = I915_READ(HSW_PWR_WELL_DRIVER);
6254
	if (val != 0) {
6793
	if (val != 0) {
6255
		DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
6794
		DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
6256
		return false;
6795
		return false;
6257
	}
6796
	}
6258
 
6797
 
6259
	return true;
6798
	return true;
6260
}
6799
}
6261
 
6800
 
6262
/* Since we're called from modeset_global_resources there's no way to
6801
/* Since we're called from modeset_global_resources there's no way to
6263
 * symmetrically increase and decrease the refcount, so we use
6802
 * symmetrically increase and decrease the refcount, so we use
6264
 * dev_priv->pc8.requirements_met to track whether we already have the refcount
6803
 * dev_priv->pc8.requirements_met to track whether we already have the refcount
6265
 * or not.
6804
 * or not.
6266
 */
6805
 */
6267
static void hsw_update_package_c8(struct drm_device *dev)
6806
static void hsw_update_package_c8(struct drm_device *dev)
6268
{
6807
{
6269
	struct drm_i915_private *dev_priv = dev->dev_private;
6808
	struct drm_i915_private *dev_priv = dev->dev_private;
6270
	bool allow;
6809
	bool allow;
-
 
6810
 
-
 
6811
	if (!HAS_PC8(dev_priv->dev))
-
 
6812
		return;
6271
 
6813
 
6272
	if (!i915_enable_pc8)
6814
	if (!i915_enable_pc8)
6273
		return;
6815
		return;
6274
 
6816
 
6275
	mutex_lock(&dev_priv->pc8.lock);
6817
	mutex_lock(&dev_priv->pc8.lock);
6276
 
6818
 
6277
	allow = hsw_can_enable_package_c8(dev_priv);
6819
	allow = hsw_can_enable_package_c8(dev_priv);
6278
 
6820
 
6279
	if (allow == dev_priv->pc8.requirements_met)
6821
	if (allow == dev_priv->pc8.requirements_met)
6280
		goto done;
6822
		goto done;
6281
 
6823
 
6282
	dev_priv->pc8.requirements_met = allow;
6824
	dev_priv->pc8.requirements_met = allow;
6283
 
6825
 
6284
	if (allow)
6826
	if (allow)
6285
		__hsw_enable_package_c8(dev_priv);
6827
		__hsw_enable_package_c8(dev_priv);
6286
	else
6828
	else
6287
		__hsw_disable_package_c8(dev_priv);
6829
		__hsw_disable_package_c8(dev_priv);
6288
 
6830
 
6289
done:
6831
done:
6290
	mutex_unlock(&dev_priv->pc8.lock);
6832
	mutex_unlock(&dev_priv->pc8.lock);
6291
}
6833
}
6292
 
6834
 
6293
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6835
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6294
{
6836
{
-
 
6837
	if (!HAS_PC8(dev_priv->dev))
-
 
6838
		return;
-
 
6839
 
-
 
6840
	mutex_lock(&dev_priv->pc8.lock);
6295
	if (!dev_priv->pc8.gpu_idle) {
6841
	if (!dev_priv->pc8.gpu_idle) {
6296
		dev_priv->pc8.gpu_idle = true;
6842
		dev_priv->pc8.gpu_idle = true;
6297
		hsw_enable_package_c8(dev_priv);
6843
		__hsw_enable_package_c8(dev_priv);
6298
	}
6844
	}
-
 
6845
	mutex_unlock(&dev_priv->pc8.lock);
6299
}
6846
}
6300
 
6847
 
6301
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6848
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6302
{
6849
{
-
 
6850
	if (!HAS_PC8(dev_priv->dev))
-
 
6851
		return;
-
 
6852
 
-
 
6853
	mutex_lock(&dev_priv->pc8.lock);
6303
	if (dev_priv->pc8.gpu_idle) {
6854
	if (dev_priv->pc8.gpu_idle) {
6304
		dev_priv->pc8.gpu_idle = false;
6855
		dev_priv->pc8.gpu_idle = false;
6305
		hsw_disable_package_c8(dev_priv);
6856
		__hsw_disable_package_c8(dev_priv);
6306
	}
6857
	}
-
 
6858
	mutex_unlock(&dev_priv->pc8.lock);
6307
}
6859
}
-
 
6860
 
-
 
6861
#define for_each_power_domain(domain, mask)				\
-
 
6862
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
-
 
6863
		if ((1 << (domain)) & (mask))
6308
 
6864
 
-
 
6865
static unsigned long get_pipe_power_domains(struct drm_device *dev,
6309
static void haswell_modeset_global_resources(struct drm_device *dev)
6866
					    enum pipe pipe, bool pfit_enabled)
6310
{
6867
{
-
 
6868
	unsigned long mask;
-
 
6869
	enum transcoder transcoder;
-
 
6870
 
-
 
6871
	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
-
 
6872
 
-
 
6873
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
-
 
6874
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
-
 
6875
	if (pfit_enabled)
-
 
6876
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
-
 
6877
 
-
 
6878
	return mask;
-
 
6879
}
-
 
6880
 
-
 
6881
void intel_display_set_init_power(struct drm_device *dev, bool enable)
-
 
6882
{
-
 
6883
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
6884
 
-
 
6885
	if (dev_priv->power_domains.init_power_on == enable)
-
 
6886
		return;
-
 
6887
 
-
 
6888
	if (enable)
-
 
6889
		intel_display_power_get(dev, POWER_DOMAIN_INIT);
-
 
6890
	else
-
 
6891
		intel_display_power_put(dev, POWER_DOMAIN_INIT);
-
 
6892
 
-
 
6893
	dev_priv->power_domains.init_power_on = enable;
-
 
6894
}
-
 
6895
 
-
 
6896
static void modeset_update_power_wells(struct drm_device *dev)
-
 
6897
{
6311
	bool enable = false;
6898
	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
-
 
6899
	struct intel_crtc *crtc;
-
 
6900
 
-
 
6901
	/*
-
 
6902
	 * First get all needed power domains, then put all unneeded, to avoid
6312
	struct intel_crtc *crtc;
6903
	 * any unnecessary toggling of the power wells.
-
 
6904
	 */
-
 
6905
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6313
 
6906
		enum intel_display_power_domain domain;
6314
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6907
 
6315
		if (!crtc->base.enabled)
6908
		if (!crtc->base.enabled)
6316
			continue;
6909
			continue;
-
 
6910
 
-
 
6911
		pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
6317
 
6912
						crtc->pipe,
-
 
6913
						crtc->config.pch_pfit.enabled);
6318
		if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
6914
 
6319
		    crtc->config.cpu_transcoder != TRANSCODER_EDP)
6915
		for_each_power_domain(domain, pipe_domains[crtc->pipe])
6320
			enable = true;
6916
			intel_display_power_get(dev, domain);
-
 
6917
	}
-
 
6918
 
-
 
6919
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
-
 
6920
		enum intel_display_power_domain domain;
6321
	}
6921
 
-
 
6922
		for_each_power_domain(domain, crtc->enabled_power_domains)
-
 
6923
			intel_display_power_put(dev, domain);
-
 
6924
 
-
 
6925
		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
-
 
6926
	}
-
 
6927
 
-
 
6928
	intel_display_set_init_power(dev, false);
-
 
6929
}
-
 
6930
 
6322
 
6931
static void haswell_modeset_global_resources(struct drm_device *dev)
6323
	intel_set_power_well(dev, enable);
6932
{
6324
 
6933
	modeset_update_power_wells(dev);
6325
	hsw_update_package_c8(dev);
6934
	hsw_update_package_c8(dev);
6326
}
6935
}
6327
 
6936
 
6328
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6937
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6329
				 int x, int y,
6938
				 int x, int y,
6330
				 struct drm_framebuffer *fb)
6939
				 struct drm_framebuffer *fb)
6331
{
6940
{
6332
	struct drm_device *dev = crtc->dev;
6941
	struct drm_device *dev = crtc->dev;
6333
	struct drm_i915_private *dev_priv = dev->dev_private;
6942
	struct drm_i915_private *dev_priv = dev->dev_private;
6334
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6943
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6335
	int plane = intel_crtc->plane;
6944
	int plane = intel_crtc->plane;
6336
	int ret;
6945
	int ret;
6337
 
6946
 
6338
	if (!intel_ddi_pll_mode_set(crtc))
6947
	if (!intel_ddi_pll_select(intel_crtc))
-
 
6948
		return -EINVAL;
6339
		return -EINVAL;
6949
	intel_ddi_pll_enable(intel_crtc);
6340
 
6950
 
6341
	if (intel_crtc->config.has_dp_encoder)
6951
	if (intel_crtc->config.has_dp_encoder)
6342
		intel_dp_set_m_n(intel_crtc);
6952
		intel_dp_set_m_n(intel_crtc);
6343
 
6953
 
6344
	intel_crtc->lowfreq_avail = false;
6954
	intel_crtc->lowfreq_avail = false;
6345
 
6955
 
6346
	intel_set_pipe_timings(intel_crtc);
6956
	intel_set_pipe_timings(intel_crtc);
6347
 
6957
 
6348
	if (intel_crtc->config.has_pch_encoder) {
6958
	if (intel_crtc->config.has_pch_encoder) {
6349
		intel_cpu_transcoder_set_m_n(intel_crtc,
6959
		intel_cpu_transcoder_set_m_n(intel_crtc,
6350
					     &intel_crtc->config.fdi_m_n);
6960
					     &intel_crtc->config.fdi_m_n);
6351
	}
6961
	}
6352
 
6962
 
6353
	haswell_set_pipeconf(crtc);
6963
	haswell_set_pipeconf(crtc);
6354
 
6964
 
6355
	intel_set_pipe_csc(crtc);
6965
	intel_set_pipe_csc(crtc);
6356
 
6966
 
6357
	/* Set up the display plane register */
6967
	/* Set up the display plane register */
6358
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
6968
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
6359
    POSTING_READ(DSPCNTR(plane));
6969
    POSTING_READ(DSPCNTR(plane));
6360
 
6970
 
6361
	ret = intel_pipe_set_base(crtc, x, y, fb);
6971
	ret = intel_pipe_set_base(crtc, x, y, fb);
6362
 
-
 
6363
    intel_update_watermarks(dev);
-
 
6364
 
6972
 
6365
    return ret;
6973
    return ret;
6366
}
6974
}
6367
 
6975
 
6368
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6976
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6369
				    struct intel_crtc_config *pipe_config)
6977
				    struct intel_crtc_config *pipe_config)
6370
{
6978
{
6371
	struct drm_device *dev = crtc->base.dev;
6979
	struct drm_device *dev = crtc->base.dev;
6372
	struct drm_i915_private *dev_priv = dev->dev_private;
6980
	struct drm_i915_private *dev_priv = dev->dev_private;
6373
	enum intel_display_power_domain pfit_domain;
6981
	enum intel_display_power_domain pfit_domain;
6374
	uint32_t tmp;
6982
	uint32_t tmp;
6375
 
6983
 
6376
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6984
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6377
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6985
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6378
 
6986
 
6379
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
6987
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
6380
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
6988
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
6381
		enum pipe trans_edp_pipe;
6989
		enum pipe trans_edp_pipe;
6382
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6990
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6383
		default:
6991
		default:
6384
			WARN(1, "unknown pipe linked to edp transcoder\n");
6992
			WARN(1, "unknown pipe linked to edp transcoder\n");
6385
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
6993
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
6386
		case TRANS_DDI_EDP_INPUT_A_ON:
6994
		case TRANS_DDI_EDP_INPUT_A_ON:
6387
			trans_edp_pipe = PIPE_A;
6995
			trans_edp_pipe = PIPE_A;
6388
			break;
6996
			break;
6389
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
6997
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
6390
			trans_edp_pipe = PIPE_B;
6998
			trans_edp_pipe = PIPE_B;
6391
			break;
6999
			break;
6392
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
7000
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
6393
			trans_edp_pipe = PIPE_C;
7001
			trans_edp_pipe = PIPE_C;
6394
			break;
7002
			break;
6395
		}
7003
		}
6396
 
7004
 
6397
		if (trans_edp_pipe == crtc->pipe)
7005
		if (trans_edp_pipe == crtc->pipe)
6398
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
7006
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
6399
	}
7007
	}
6400
 
7008
 
6401
	if (!intel_display_power_enabled(dev,
7009
	if (!intel_display_power_enabled(dev,
6402
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7010
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6403
		return false;
7011
		return false;
6404
 
7012
 
6405
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
7013
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
6406
	if (!(tmp & PIPECONF_ENABLE))
7014
	if (!(tmp & PIPECONF_ENABLE))
6407
		return false;
7015
		return false;
6408
 
7016
 
6409
	/*
7017
	/*
6410
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
7018
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
6411
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
7019
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
6412
	 * the PCH transcoder is on.
7020
	 * the PCH transcoder is on.
6413
	 */
7021
	 */
6414
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7022
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
6415
	if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
7023
	if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
6416
	    I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7024
	    I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
6417
		pipe_config->has_pch_encoder = true;
7025
		pipe_config->has_pch_encoder = true;
6418
 
7026
 
6419
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7027
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
6420
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7028
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6421
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7029
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6422
 
7030
 
6423
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7031
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
6424
	}
7032
	}
6425
 
7033
 
6426
	intel_get_pipe_timings(crtc, pipe_config);
7034
	intel_get_pipe_timings(crtc, pipe_config);
6427
 
7035
 
6428
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7036
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
6429
	if (intel_display_power_enabled(dev, pfit_domain))
7037
	if (intel_display_power_enabled(dev, pfit_domain))
6430
		ironlake_get_pfit_config(crtc, pipe_config);
7038
		ironlake_get_pfit_config(crtc, pipe_config);
-
 
7039
 
6431
 
7040
	if (IS_HASWELL(dev))
6432
	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7041
	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
6433
				   (I915_READ(IPS_CTL) & IPS_ENABLE);
7042
				   (I915_READ(IPS_CTL) & IPS_ENABLE);
6434
 
7043
 
6435
	pipe_config->pixel_multiplier = 1;
7044
	pipe_config->pixel_multiplier = 1;
6436
 
7045
 
6437
	return true;
7046
	return true;
6438
}
7047
}
6439
 
7048
 
6440
static int intel_crtc_mode_set(struct drm_crtc *crtc,
7049
static int intel_crtc_mode_set(struct drm_crtc *crtc,
6441
			       int x, int y,
7050
			       int x, int y,
6442
			       struct drm_framebuffer *fb)
7051
			       struct drm_framebuffer *fb)
6443
{
7052
{
6444
	struct drm_device *dev = crtc->dev;
7053
	struct drm_device *dev = crtc->dev;
6445
	struct drm_i915_private *dev_priv = dev->dev_private;
7054
	struct drm_i915_private *dev_priv = dev->dev_private;
6446
	struct intel_encoder *encoder;
7055
	struct intel_encoder *encoder;
6447
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7056
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6448
	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
7057
	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
6449
	int pipe = intel_crtc->pipe;
7058
	int pipe = intel_crtc->pipe;
6450
	int ret;
7059
	int ret;
6451
 
7060
 
6452
	drm_vblank_pre_modeset(dev, pipe);
7061
	drm_vblank_pre_modeset(dev, pipe);
6453
 
7062
 
6454
	ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
7063
	ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
6455
 
7064
 
6456
	drm_vblank_post_modeset(dev, pipe);
7065
	drm_vblank_post_modeset(dev, pipe);
6457
 
7066
 
6458
	if (ret != 0)
7067
	if (ret != 0)
6459
	return ret;
7068
	return ret;
6460
 
7069
 
6461
	for_each_encoder_on_crtc(dev, crtc, encoder) {
7070
	for_each_encoder_on_crtc(dev, crtc, encoder) {
6462
		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
7071
		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
6463
			encoder->base.base.id,
7072
			encoder->base.base.id,
6464
			drm_get_encoder_name(&encoder->base),
7073
			drm_get_encoder_name(&encoder->base),
6465
			mode->base.id, mode->name);
7074
			mode->base.id, mode->name);
6466
			encoder->mode_set(encoder);
7075
			encoder->mode_set(encoder);
6467
	}
7076
	}
6468
 
7077
 
6469
	return 0;
7078
	return 0;
6470
}
7079
}
-
 
7080
 
-
 
7081
static struct {
-
 
7082
	int clock;
-
 
7083
	u32 config;
-
 
7084
} hdmi_audio_clock[] = {
-
 
7085
	{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
-
 
7086
	{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
-
 
7087
	{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
-
 
7088
	{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
-
 
7089
	{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
-
 
7090
	{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
-
 
7091
	{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
-
 
7092
	{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
-
 
7093
	{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
-
 
7094
	{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
-
 
7095
};
-
 
7096
 
-
 
7097
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
-
 
7098
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
-
 
7099
{
-
 
7100
	int i;
-
 
7101
 
-
 
7102
	for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
-
 
7103
		if (mode->clock == hdmi_audio_clock[i].clock)
-
 
7104
			break;
-
 
7105
	}
-
 
7106
 
-
 
7107
	if (i == ARRAY_SIZE(hdmi_audio_clock)) {
-
 
7108
		DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
-
 
7109
		i = 1;
-
 
7110
	}
-
 
7111
 
-
 
7112
	DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
-
 
7113
		      hdmi_audio_clock[i].clock,
-
 
7114
		      hdmi_audio_clock[i].config);
-
 
7115
 
-
 
7116
	return hdmi_audio_clock[i].config;
-
 
7117
}
6471
 
7118
 
6472
static bool intel_eld_uptodate(struct drm_connector *connector,
7119
static bool intel_eld_uptodate(struct drm_connector *connector,
6473
			       int reg_eldv, uint32_t bits_eldv,
7120
			       int reg_eldv, uint32_t bits_eldv,
6474
			       int reg_elda, uint32_t bits_elda,
7121
			       int reg_elda, uint32_t bits_elda,
6475
			       int reg_edid)
7122
			       int reg_edid)
6476
{
7123
{
6477
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7124
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6478
	uint8_t *eld = connector->eld;
7125
	uint8_t *eld = connector->eld;
6479
	uint32_t i;
7126
	uint32_t i;
6480
 
7127
 
6481
	i = I915_READ(reg_eldv);
7128
	i = I915_READ(reg_eldv);
6482
	i &= bits_eldv;
7129
	i &= bits_eldv;
6483
 
7130
 
6484
	if (!eld[0])
7131
	if (!eld[0])
6485
		return !i;
7132
		return !i;
6486
 
7133
 
6487
	if (!i)
7134
	if (!i)
6488
		return false;
7135
		return false;
6489
 
7136
 
6490
	i = I915_READ(reg_elda);
7137
	i = I915_READ(reg_elda);
6491
	i &= ~bits_elda;
7138
	i &= ~bits_elda;
6492
	I915_WRITE(reg_elda, i);
7139
	I915_WRITE(reg_elda, i);
6493
 
7140
 
6494
	for (i = 0; i < eld[2]; i++)
7141
	for (i = 0; i < eld[2]; i++)
6495
		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
7142
		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6496
			return false;
7143
			return false;
6497
 
7144
 
6498
	return true;
7145
	return true;
6499
}
7146
}
6500
 
7147
 
6501
static void g4x_write_eld(struct drm_connector *connector,
7148
static void g4x_write_eld(struct drm_connector *connector,
6502
			  struct drm_crtc *crtc)
7149
			  struct drm_crtc *crtc,
-
 
7150
			  struct drm_display_mode *mode)
6503
{
7151
{
6504
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7152
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6505
	uint8_t *eld = connector->eld;
7153
	uint8_t *eld = connector->eld;
6506
	uint32_t eldv;
7154
	uint32_t eldv;
6507
	uint32_t len;
7155
	uint32_t len;
6508
	uint32_t i;
7156
	uint32_t i;
6509
 
7157
 
6510
	i = I915_READ(G4X_AUD_VID_DID);
7158
	i = I915_READ(G4X_AUD_VID_DID);
6511
 
7159
 
6512
	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
7160
	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6513
		eldv = G4X_ELDV_DEVCL_DEVBLC;
7161
		eldv = G4X_ELDV_DEVCL_DEVBLC;
6514
	else
7162
	else
6515
		eldv = G4X_ELDV_DEVCTG;
7163
		eldv = G4X_ELDV_DEVCTG;
6516
 
7164
 
6517
	if (intel_eld_uptodate(connector,
7165
	if (intel_eld_uptodate(connector,
6518
			       G4X_AUD_CNTL_ST, eldv,
7166
			       G4X_AUD_CNTL_ST, eldv,
6519
			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
7167
			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6520
			       G4X_HDMIW_HDMIEDID))
7168
			       G4X_HDMIW_HDMIEDID))
6521
		return;
7169
		return;
6522
 
7170
 
6523
	i = I915_READ(G4X_AUD_CNTL_ST);
7171
	i = I915_READ(G4X_AUD_CNTL_ST);
6524
	i &= ~(eldv | G4X_ELD_ADDR);
7172
	i &= ~(eldv | G4X_ELD_ADDR);
6525
	len = (i >> 9) & 0x1f;		/* ELD buffer size */
7173
	len = (i >> 9) & 0x1f;		/* ELD buffer size */
6526
	I915_WRITE(G4X_AUD_CNTL_ST, i);
7174
	I915_WRITE(G4X_AUD_CNTL_ST, i);
6527
 
7175
 
6528
	if (!eld[0])
7176
	if (!eld[0])
6529
		return;
7177
		return;
6530
 
7178
 
6531
	len = min_t(uint8_t, eld[2], len);
7179
	len = min_t(uint8_t, eld[2], len);
6532
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7180
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6533
	for (i = 0; i < len; i++)
7181
	for (i = 0; i < len; i++)
6534
		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
7182
		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6535
 
7183
 
6536
	i = I915_READ(G4X_AUD_CNTL_ST);
7184
	i = I915_READ(G4X_AUD_CNTL_ST);
6537
	i |= eldv;
7185
	i |= eldv;
6538
	I915_WRITE(G4X_AUD_CNTL_ST, i);
7186
	I915_WRITE(G4X_AUD_CNTL_ST, i);
6539
}
7187
}
6540
 
7188
 
6541
static void haswell_write_eld(struct drm_connector *connector,
7189
static void haswell_write_eld(struct drm_connector *connector,
6542
				     struct drm_crtc *crtc)
7190
			      struct drm_crtc *crtc,
-
 
7191
			      struct drm_display_mode *mode)
6543
{
7192
{
6544
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7193
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6545
	uint8_t *eld = connector->eld;
7194
	uint8_t *eld = connector->eld;
6546
	struct drm_device *dev = crtc->dev;
7195
	struct drm_device *dev = crtc->dev;
6547
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7196
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6548
	uint32_t eldv;
7197
	uint32_t eldv;
6549
	uint32_t i;
7198
	uint32_t i;
6550
	int len;
7199
	int len;
6551
	int pipe = to_intel_crtc(crtc)->pipe;
7200
	int pipe = to_intel_crtc(crtc)->pipe;
6552
	int tmp;
7201
	int tmp;
6553
 
7202
 
6554
	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
7203
	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
6555
	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
7204
	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
6556
	int aud_config = HSW_AUD_CFG(pipe);
7205
	int aud_config = HSW_AUD_CFG(pipe);
6557
	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
7206
	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
6558
 
7207
 
6559
 
7208
 
6560
	DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
7209
	DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
6561
 
7210
 
6562
	/* Audio output enable */
7211
	/* Audio output enable */
6563
	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
7212
	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
6564
	tmp = I915_READ(aud_cntrl_st2);
7213
	tmp = I915_READ(aud_cntrl_st2);
6565
	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
7214
	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
6566
	I915_WRITE(aud_cntrl_st2, tmp);
7215
	I915_WRITE(aud_cntrl_st2, tmp);
6567
 
7216
 
6568
	/* Wait for 1 vertical blank */
7217
	/* Wait for 1 vertical blank */
6569
	intel_wait_for_vblank(dev, pipe);
7218
	intel_wait_for_vblank(dev, pipe);
6570
 
7219
 
6571
	/* Set ELD valid state */
7220
	/* Set ELD valid state */
6572
	tmp = I915_READ(aud_cntrl_st2);
7221
	tmp = I915_READ(aud_cntrl_st2);
6573
	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
7222
	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
6574
	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
7223
	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
6575
	I915_WRITE(aud_cntrl_st2, tmp);
7224
	I915_WRITE(aud_cntrl_st2, tmp);
6576
	tmp = I915_READ(aud_cntrl_st2);
7225
	tmp = I915_READ(aud_cntrl_st2);
6577
	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
7226
	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
6578
 
7227
 
6579
	/* Enable HDMI mode */
7228
	/* Enable HDMI mode */
6580
	tmp = I915_READ(aud_config);
7229
	tmp = I915_READ(aud_config);
6581
	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
7230
	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
6582
	/* clear N_programing_enable and N_value_index */
7231
	/* clear N_programing_enable and N_value_index */
6583
	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
7232
	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
6584
	I915_WRITE(aud_config, tmp);
7233
	I915_WRITE(aud_config, tmp);
6585
 
7234
 
6586
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7235
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
6587
 
7236
 
6588
	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
7237
	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
6589
	intel_crtc->eld_vld = true;
7238
	intel_crtc->eld_vld = true;
6590
 
7239
 
6591
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7240
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6592
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7241
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6593
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
7242
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
6594
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7243
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6595
	} else
7244
	} else {
6596
		I915_WRITE(aud_config, 0);
7245
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
-
 
7246
	}
6597
 
7247
 
6598
	if (intel_eld_uptodate(connector,
7248
	if (intel_eld_uptodate(connector,
6599
			       aud_cntrl_st2, eldv,
7249
			       aud_cntrl_st2, eldv,
6600
			       aud_cntl_st, IBX_ELD_ADDRESS,
7250
			       aud_cntl_st, IBX_ELD_ADDRESS,
6601
			       hdmiw_hdmiedid))
7251
			       hdmiw_hdmiedid))
6602
		return;
7252
		return;
6603
 
7253
 
6604
	i = I915_READ(aud_cntrl_st2);
7254
	i = I915_READ(aud_cntrl_st2);
6605
	i &= ~eldv;
7255
	i &= ~eldv;
6606
	I915_WRITE(aud_cntrl_st2, i);
7256
	I915_WRITE(aud_cntrl_st2, i);
6607
 
7257
 
6608
	if (!eld[0])
7258
	if (!eld[0])
6609
		return;
7259
		return;
6610
 
7260
 
6611
	i = I915_READ(aud_cntl_st);
7261
	i = I915_READ(aud_cntl_st);
6612
	i &= ~IBX_ELD_ADDRESS;
7262
	i &= ~IBX_ELD_ADDRESS;
6613
	I915_WRITE(aud_cntl_st, i);
7263
	I915_WRITE(aud_cntl_st, i);
6614
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
7264
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
6615
	DRM_DEBUG_DRIVER("port num:%d\n", i);
7265
	DRM_DEBUG_DRIVER("port num:%d\n", i);
6616
 
7266
 
6617
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
7267
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
6618
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7268
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6619
	for (i = 0; i < len; i++)
7269
	for (i = 0; i < len; i++)
6620
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7270
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6621
 
7271
 
6622
	i = I915_READ(aud_cntrl_st2);
7272
	i = I915_READ(aud_cntrl_st2);
6623
	i |= eldv;
7273
	i |= eldv;
6624
	I915_WRITE(aud_cntrl_st2, i);
7274
	I915_WRITE(aud_cntrl_st2, i);
6625
 
7275
 
6626
}
7276
}
6627
 
7277
 
6628
static void ironlake_write_eld(struct drm_connector *connector,
7278
static void ironlake_write_eld(struct drm_connector *connector,
6629
				     struct drm_crtc *crtc)
7279
			       struct drm_crtc *crtc,
-
 
7280
			       struct drm_display_mode *mode)
6630
{
7281
{
6631
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7282
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6632
	uint8_t *eld = connector->eld;
7283
	uint8_t *eld = connector->eld;
6633
	uint32_t eldv;
7284
	uint32_t eldv;
6634
	uint32_t i;
7285
	uint32_t i;
6635
	int len;
7286
	int len;
6636
	int hdmiw_hdmiedid;
7287
	int hdmiw_hdmiedid;
6637
	int aud_config;
7288
	int aud_config;
6638
	int aud_cntl_st;
7289
	int aud_cntl_st;
6639
	int aud_cntrl_st2;
7290
	int aud_cntrl_st2;
6640
	int pipe = to_intel_crtc(crtc)->pipe;
7291
	int pipe = to_intel_crtc(crtc)->pipe;
6641
 
7292
 
6642
	if (HAS_PCH_IBX(connector->dev)) {
7293
	if (HAS_PCH_IBX(connector->dev)) {
6643
		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
7294
		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
6644
		aud_config = IBX_AUD_CFG(pipe);
7295
		aud_config = IBX_AUD_CFG(pipe);
6645
		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
7296
		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
6646
		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
7297
		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
-
 
7298
	} else if (IS_VALLEYVIEW(connector->dev)) {
-
 
7299
		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
-
 
7300
		aud_config = VLV_AUD_CFG(pipe);
-
 
7301
		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
-
 
7302
		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
6647
	} else {
7303
	} else {
6648
		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
7304
		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
6649
		aud_config = CPT_AUD_CFG(pipe);
7305
		aud_config = CPT_AUD_CFG(pipe);
6650
		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
7306
		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
6651
		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
7307
		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6652
	}
7308
	}
6653
 
7309
 
6654
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7310
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
-
 
7311
 
-
 
7312
	if (IS_VALLEYVIEW(connector->dev))  {
-
 
7313
		struct intel_encoder *intel_encoder;
-
 
7314
		struct intel_digital_port *intel_dig_port;
-
 
7315
 
-
 
7316
		intel_encoder = intel_attached_encoder(connector);
-
 
7317
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
-
 
7318
		i = intel_dig_port->port;
6655
 
7319
	} else {
-
 
7320
	i = I915_READ(aud_cntl_st);
6656
	i = I915_READ(aud_cntl_st);
7321
		i = (i >> 29) & DIP_PORT_SEL_MASK;
-
 
7322
		/* DIP_Port_Select, 0x1 = PortB */
-
 
7323
	}
6657
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
7324
 
6658
	if (!i) {
7325
	if (!i) {
6659
		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
7326
		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6660
		/* operate blindly on all ports */
7327
		/* operate blindly on all ports */
6661
		eldv = IBX_ELD_VALIDB;
7328
		eldv = IBX_ELD_VALIDB;
6662
		eldv |= IBX_ELD_VALIDB << 4;
7329
		eldv |= IBX_ELD_VALIDB << 4;
6663
		eldv |= IBX_ELD_VALIDB << 8;
7330
		eldv |= IBX_ELD_VALIDB << 8;
6664
	} else {
7331
	} else {
6665
		DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
7332
		DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
6666
		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
7333
		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6667
	}
7334
	}
6668
 
7335
 
6669
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7336
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6670
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7337
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6671
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
7338
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
6672
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7339
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6673
	} else
7340
	} else {
6674
		I915_WRITE(aud_config, 0);
7341
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
-
 
7342
	}
6675
 
7343
 
6676
	if (intel_eld_uptodate(connector,
7344
	if (intel_eld_uptodate(connector,
6677
			       aud_cntrl_st2, eldv,
7345
			       aud_cntrl_st2, eldv,
6678
			       aud_cntl_st, IBX_ELD_ADDRESS,
7346
			       aud_cntl_st, IBX_ELD_ADDRESS,
6679
			       hdmiw_hdmiedid))
7347
			       hdmiw_hdmiedid))
6680
		return;
7348
		return;
6681
 
7349
 
6682
	i = I915_READ(aud_cntrl_st2);
7350
	i = I915_READ(aud_cntrl_st2);
6683
	i &= ~eldv;
7351
	i &= ~eldv;
6684
	I915_WRITE(aud_cntrl_st2, i);
7352
	I915_WRITE(aud_cntrl_st2, i);
6685
 
7353
 
6686
	if (!eld[0])
7354
	if (!eld[0])
6687
		return;
7355
		return;
6688
 
7356
 
6689
	i = I915_READ(aud_cntl_st);
7357
	i = I915_READ(aud_cntl_st);
6690
	i &= ~IBX_ELD_ADDRESS;
7358
	i &= ~IBX_ELD_ADDRESS;
6691
	I915_WRITE(aud_cntl_st, i);
7359
	I915_WRITE(aud_cntl_st, i);
6692
 
7360
 
6693
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
7361
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
6694
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7362
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
6695
	for (i = 0; i < len; i++)
7363
	for (i = 0; i < len; i++)
6696
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7364
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6697
 
7365
 
6698
	i = I915_READ(aud_cntrl_st2);
7366
	i = I915_READ(aud_cntrl_st2);
6699
	i |= eldv;
7367
	i |= eldv;
6700
	I915_WRITE(aud_cntrl_st2, i);
7368
	I915_WRITE(aud_cntrl_st2, i);
6701
}
7369
}
6702
 
7370
 
6703
void intel_write_eld(struct drm_encoder *encoder,
7371
void intel_write_eld(struct drm_encoder *encoder,
6704
		     struct drm_display_mode *mode)
7372
		     struct drm_display_mode *mode)
6705
{
7373
{
6706
	struct drm_crtc *crtc = encoder->crtc;
7374
	struct drm_crtc *crtc = encoder->crtc;
6707
	struct drm_connector *connector;
7375
	struct drm_connector *connector;
6708
	struct drm_device *dev = encoder->dev;
7376
	struct drm_device *dev = encoder->dev;
6709
	struct drm_i915_private *dev_priv = dev->dev_private;
7377
	struct drm_i915_private *dev_priv = dev->dev_private;
6710
 
7378
 
6711
	connector = drm_select_eld(encoder, mode);
7379
	connector = drm_select_eld(encoder, mode);
6712
	if (!connector)
7380
	if (!connector)
6713
		return;
7381
		return;
6714
 
7382
 
6715
	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7383
	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6716
			 connector->base.id,
7384
			 connector->base.id,
6717
			 drm_get_connector_name(connector),
7385
			 drm_get_connector_name(connector),
6718
			 connector->encoder->base.id,
7386
			 connector->encoder->base.id,
6719
			 drm_get_encoder_name(connector->encoder));
7387
			 drm_get_encoder_name(connector->encoder));
6720
 
7388
 
6721
	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
7389
	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6722
 
7390
 
6723
	if (dev_priv->display.write_eld)
7391
	if (dev_priv->display.write_eld)
6724
		dev_priv->display.write_eld(connector, crtc);
7392
		dev_priv->display.write_eld(connector, crtc, mode);
6725
}
-
 
6726
 
-
 
6727
/** Loads the palette/gamma unit for the CRTC with the prepared values */
-
 
6728
void intel_crtc_load_lut(struct drm_crtc *crtc)
-
 
6729
{
-
 
6730
	struct drm_device *dev = crtc->dev;
-
 
6731
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
6732
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
6733
	enum pipe pipe = intel_crtc->pipe;
-
 
6734
	int palreg = PALETTE(pipe);
-
 
6735
	int i;
-
 
6736
	bool reenable_ips = false;
-
 
6737
 
-
 
6738
	/* The clocks have to be on to load the palette. */
-
 
6739
	if (!crtc->enabled || !intel_crtc->active)
-
 
6740
		return;
-
 
6741
 
-
 
6742
	if (!HAS_PCH_SPLIT(dev_priv->dev))
-
 
6743
		assert_pll_enabled(dev_priv, pipe);
-
 
6744
 
-
 
6745
	/* use legacy palette for Ironlake */
-
 
6746
	if (HAS_PCH_SPLIT(dev))
-
 
6747
		palreg = LGC_PALETTE(pipe);
-
 
6748
 
-
 
6749
	/* Workaround : Do not read or write the pipe palette/gamma data while
-
 
6750
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
-
 
6751
	 */
-
 
6752
	if (intel_crtc->config.ips_enabled &&
-
 
6753
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
-
 
6754
	     GAMMA_MODE_MODE_SPLIT)) {
-
 
6755
		hsw_disable_ips(intel_crtc);
-
 
6756
		reenable_ips = true;
-
 
6757
	}
7393
}
6758
 
-
 
6759
	for (i = 0; i < 256; i++) {
-
 
6760
		I915_WRITE(palreg + 4 * i,
-
 
6761
			   (intel_crtc->lut_r[i] << 16) |
-
 
6762
			   (intel_crtc->lut_g[i] << 8) |
-
 
6763
			   intel_crtc->lut_b[i]);
-
 
6764
	}
-
 
6765
 
-
 
6766
	if (reenable_ips)
-
 
6767
		hsw_enable_ips(intel_crtc);
-
 
6768
}
-
 
6769
 
-
 
6770
#if 0
7394
 
6771
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
7395
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6772
{
7396
{
6773
	struct drm_device *dev = crtc->dev;
7397
	struct drm_device *dev = crtc->dev;
6774
	struct drm_i915_private *dev_priv = dev->dev_private;
7398
	struct drm_i915_private *dev_priv = dev->dev_private;
6775
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7399
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6776
	bool visible = base != 0;
7400
	bool visible = base != 0;
6777
	u32 cntl;
7401
	u32 cntl;
6778
 
7402
 
6779
	if (intel_crtc->cursor_visible == visible)
7403
	if (intel_crtc->cursor_visible == visible)
6780
		return;
7404
		return;
6781
 
7405
 
6782
	cntl = I915_READ(_CURACNTR);
7406
	cntl = I915_READ(_CURACNTR);
6783
	if (visible) {
7407
	if (visible) {
6784
		/* On these chipsets we can only modify the base whilst
7408
		/* On these chipsets we can only modify the base whilst
6785
		 * the cursor is disabled.
7409
		 * the cursor is disabled.
6786
		 */
7410
		 */
6787
		I915_WRITE(_CURABASE, base);
7411
		I915_WRITE(_CURABASE, base);
6788
 
7412
 
6789
		cntl &= ~(CURSOR_FORMAT_MASK);
7413
		cntl &= ~(CURSOR_FORMAT_MASK);
6790
		/* XXX width must be 64, stride 256 => 0x00 << 28 */
7414
		/* XXX width must be 64, stride 256 => 0x00 << 28 */
6791
		cntl |= CURSOR_ENABLE |
7415
		cntl |= CURSOR_ENABLE |
6792
			CURSOR_GAMMA_ENABLE |
7416
			CURSOR_GAMMA_ENABLE |
6793
			CURSOR_FORMAT_ARGB;
7417
			CURSOR_FORMAT_ARGB;
6794
	} else
7418
	} else
6795
		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
7419
		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
6796
	I915_WRITE(_CURACNTR, cntl);
7420
	I915_WRITE(_CURACNTR, cntl);
6797
 
7421
 
6798
	intel_crtc->cursor_visible = visible;
7422
	intel_crtc->cursor_visible = visible;
6799
}
7423
}
6800
#endif
-
 
6801
 
7424
 
6802
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
7425
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6803
{
7426
{
6804
	struct drm_device *dev = crtc->dev;
7427
	struct drm_device *dev = crtc->dev;
6805
	struct drm_i915_private *dev_priv = dev->dev_private;
7428
	struct drm_i915_private *dev_priv = dev->dev_private;
6806
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7429
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6807
	int pipe = intel_crtc->pipe;
7430
	int pipe = intel_crtc->pipe;
6808
	bool visible = base != 0;
7431
	bool visible = base != 0;
6809
 
7432
 
6810
	if (intel_crtc->cursor_visible != visible) {
7433
	if (intel_crtc->cursor_visible != visible) {
6811
		uint32_t cntl = I915_READ(CURCNTR(pipe));
7434
		uint32_t cntl = I915_READ(CURCNTR(pipe));
6812
		if (base) {
7435
		if (base) {
6813
			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
7436
			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6814
			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
7437
			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6815
			cntl |= pipe << 28; /* Connect to correct pipe */
7438
			cntl |= pipe << 28; /* Connect to correct pipe */
6816
		} else {
7439
		} else {
6817
			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7440
			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6818
			cntl |= CURSOR_MODE_DISABLE;
7441
			cntl |= CURSOR_MODE_DISABLE;
6819
		}
7442
		}
6820
		I915_WRITE(CURCNTR(pipe), cntl);
7443
		I915_WRITE(CURCNTR(pipe), cntl);
6821
 
7444
 
6822
		intel_crtc->cursor_visible = visible;
7445
		intel_crtc->cursor_visible = visible;
6823
	}
7446
	}
6824
	/* and commit changes on next vblank */
7447
	/* and commit changes on next vblank */
6825
	POSTING_READ(CURCNTR(pipe));
7448
	POSTING_READ(CURCNTR(pipe));
6826
	I915_WRITE(CURBASE(pipe), base);
7449
	I915_WRITE(CURBASE(pipe), base);
6827
	POSTING_READ(CURBASE(pipe));
7450
	POSTING_READ(CURBASE(pipe));
6828
}
7451
}
6829
 
7452
 
6830
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
7453
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6831
{
7454
{
6832
	struct drm_device *dev = crtc->dev;
7455
	struct drm_device *dev = crtc->dev;
6833
	struct drm_i915_private *dev_priv = dev->dev_private;
7456
	struct drm_i915_private *dev_priv = dev->dev_private;
6834
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7457
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6835
	int pipe = intel_crtc->pipe;
7458
	int pipe = intel_crtc->pipe;
6836
	bool visible = base != 0;
7459
	bool visible = base != 0;
6837
 
7460
 
6838
	if (intel_crtc->cursor_visible != visible) {
7461
	if (intel_crtc->cursor_visible != visible) {
6839
		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
7462
		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
6840
		if (base) {
7463
		if (base) {
6841
			cntl &= ~CURSOR_MODE;
7464
			cntl &= ~CURSOR_MODE;
6842
			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
7465
			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6843
		} else {
7466
		} else {
6844
			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7467
			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6845
			cntl |= CURSOR_MODE_DISABLE;
7468
			cntl |= CURSOR_MODE_DISABLE;
6846
		}
7469
		}
6847
		if (IS_HASWELL(dev)) {
7470
		if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6848
			cntl |= CURSOR_PIPE_CSC_ENABLE;
7471
			cntl |= CURSOR_PIPE_CSC_ENABLE;
6849
			cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
7472
			cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
6850
		}
7473
		}
6851
		I915_WRITE(CURCNTR_IVB(pipe), cntl);
7474
		I915_WRITE(CURCNTR_IVB(pipe), cntl);
6852
 
7475
 
6853
		intel_crtc->cursor_visible = visible;
7476
		intel_crtc->cursor_visible = visible;
6854
	}
7477
	}
6855
	/* and commit changes on next vblank */
7478
	/* and commit changes on next vblank */
6856
	POSTING_READ(CURCNTR_IVB(pipe));
7479
	POSTING_READ(CURCNTR_IVB(pipe));
6857
	I915_WRITE(CURBASE_IVB(pipe), base);
7480
	I915_WRITE(CURBASE_IVB(pipe), base);
6858
	POSTING_READ(CURBASE_IVB(pipe));
7481
	POSTING_READ(CURBASE_IVB(pipe));
6859
}
7482
}
6860
 
7483
 
6861
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
7484
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6862
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
7485
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6863
				     bool on)
7486
				     bool on)
6864
{
7487
{
6865
	struct drm_device *dev = crtc->dev;
7488
	struct drm_device *dev = crtc->dev;
6866
	struct drm_i915_private *dev_priv = dev->dev_private;
7489
	struct drm_i915_private *dev_priv = dev->dev_private;
6867
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7490
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6868
	int pipe = intel_crtc->pipe;
7491
	int pipe = intel_crtc->pipe;
6869
	int x = intel_crtc->cursor_x;
7492
	int x = intel_crtc->cursor_x;
6870
	int y = intel_crtc->cursor_y;
7493
	int y = intel_crtc->cursor_y;
6871
	u32 base, pos;
7494
	u32 base = 0, pos = 0;
6872
	bool visible;
7495
	bool visible;
6873
 
7496
 
6874
	pos = 0;
-
 
6875
 
-
 
6876
	if (on && crtc->enabled && crtc->fb) {
7497
	if (on)
6877
		base = intel_crtc->cursor_addr;
-
 
6878
		if (x > (int) crtc->fb->width)
-
 
6879
			base = 0;
7498
		base = intel_crtc->cursor_addr;
6880
 
7499
 
6881
		if (y > (int) crtc->fb->height)
7500
	if (x >= intel_crtc->config.pipe_src_w)
-
 
7501
			base = 0;
6882
			base = 0;
7502
 
6883
	} else
7503
	if (y >= intel_crtc->config.pipe_src_h)
6884
		base = 0;
7504
		base = 0;
6885
 
7505
 
6886
	if (x < 0) {
7506
	if (x < 0) {
6887
		if (x + intel_crtc->cursor_width < 0)
7507
		if (x + intel_crtc->cursor_width <= 0)
6888
			base = 0;
7508
			base = 0;
6889
 
7509
 
6890
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
7510
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6891
		x = -x;
7511
		x = -x;
6892
	}
7512
	}
6893
	pos |= x << CURSOR_X_SHIFT;
7513
	pos |= x << CURSOR_X_SHIFT;
6894
 
7514
 
6895
	if (y < 0) {
7515
	if (y < 0) {
6896
		if (y + intel_crtc->cursor_height < 0)
7516
		if (y + intel_crtc->cursor_height <= 0)
6897
			base = 0;
7517
			base = 0;
6898
 
7518
 
6899
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
7519
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6900
		y = -y;
7520
		y = -y;
6901
	}
7521
	}
6902
	pos |= y << CURSOR_Y_SHIFT;
7522
	pos |= y << CURSOR_Y_SHIFT;
6903
 
7523
 
6904
	visible = base != 0;
7524
	visible = base != 0;
6905
	if (!visible && !intel_crtc->cursor_visible)
7525
	if (!visible && !intel_crtc->cursor_visible)
6906
		return;
7526
		return;
6907
 
7527
 
6908
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
7528
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6909
		I915_WRITE(CURPOS_IVB(pipe), pos);
7529
		I915_WRITE(CURPOS_IVB(pipe), pos);
6910
		ivb_update_cursor(crtc, base);
7530
		ivb_update_cursor(crtc, base);
6911
	} else {
7531
	} else {
6912
		I915_WRITE(CURPOS(pipe), pos);
7532
		I915_WRITE(CURPOS(pipe), pos);
6913
			i9xx_update_cursor(crtc, base);
7533
		i9xx_update_cursor(crtc, base);
6914
	}
7534
	}
6915
}
7535
}
6916
 
7536
 
6917
#if 0
7537
#if 0
6918
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7538
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6919
				 struct drm_file *file,
7539
				 struct drm_file *file,
6920
				 uint32_t handle,
7540
				 uint32_t handle,
6921
				 uint32_t width, uint32_t height)
7541
				 uint32_t width, uint32_t height)
6922
{
7542
{
6923
	struct drm_device *dev = crtc->dev;
7543
	struct drm_device *dev = crtc->dev;
6924
	struct drm_i915_private *dev_priv = dev->dev_private;
7544
	struct drm_i915_private *dev_priv = dev->dev_private;
6925
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7545
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6926
	struct drm_i915_gem_object *obj;
7546
	struct drm_i915_gem_object *obj;
6927
	uint32_t addr;
7547
	uint32_t addr;
6928
	int ret;
7548
	int ret;
6929
 
7549
 
6930
	/* if we want to turn off the cursor ignore width and height */
7550
	/* if we want to turn off the cursor ignore width and height */
6931
	if (!handle) {
7551
	if (!handle) {
6932
		DRM_DEBUG_KMS("cursor off\n");
7552
		DRM_DEBUG_KMS("cursor off\n");
6933
		addr = 0;
7553
		addr = 0;
6934
		obj = NULL;
7554
		obj = NULL;
6935
		mutex_lock(&dev->struct_mutex);
7555
		mutex_lock(&dev->struct_mutex);
6936
		goto finish;
7556
		goto finish;
6937
	}
7557
	}
6938
 
7558
 
6939
	/* Currently we only support 64x64 cursors */
7559
	/* Currently we only support 64x64 cursors */
6940
	if (width != 64 || height != 64) {
7560
	if (width != 64 || height != 64) {
6941
		DRM_ERROR("we currently only support 64x64 cursors\n");
7561
		DRM_ERROR("we currently only support 64x64 cursors\n");
6942
		return -EINVAL;
7562
		return -EINVAL;
6943
	}
7563
	}
6944
 
7564
 
6945
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
7565
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
6946
	if (&obj->base == NULL)
7566
	if (&obj->base == NULL)
6947
		return -ENOENT;
7567
		return -ENOENT;
6948
 
7568
 
6949
	if (obj->base.size < width * height * 4) {
7569
	if (obj->base.size < width * height * 4) {
6950
		DRM_ERROR("buffer is to small\n");
7570
		DRM_ERROR("buffer is to small\n");
6951
		ret = -ENOMEM;
7571
		ret = -ENOMEM;
6952
		goto fail;
7572
		goto fail;
6953
	}
7573
	}
6954
 
7574
 
6955
	/* we only need to pin inside GTT if cursor is non-phy */
7575
	/* we only need to pin inside GTT if cursor is non-phy */
6956
	mutex_lock(&dev->struct_mutex);
7576
	mutex_lock(&dev->struct_mutex);
6957
	if (!dev_priv->info->cursor_needs_physical) {
7577
	if (!dev_priv->info->cursor_needs_physical) {
6958
		unsigned alignment;
7578
		unsigned alignment;
6959
 
7579
 
6960
		if (obj->tiling_mode) {
7580
		if (obj->tiling_mode) {
6961
			DRM_ERROR("cursor cannot be tiled\n");
7581
			DRM_ERROR("cursor cannot be tiled\n");
6962
			ret = -EINVAL;
7582
			ret = -EINVAL;
6963
			goto fail_locked;
7583
			goto fail_locked;
6964
		}
7584
		}
6965
 
7585
 
6966
		/* Note that the w/a also requires 2 PTE of padding following
7586
		/* Note that the w/a also requires 2 PTE of padding following
6967
		 * the bo. We currently fill all unused PTE with the shadow
7587
		 * the bo. We currently fill all unused PTE with the shadow
6968
		 * page and so we should always have valid PTE following the
7588
		 * page and so we should always have valid PTE following the
6969
		 * cursor preventing the VT-d warning.
7589
		 * cursor preventing the VT-d warning.
6970
		 */
7590
		 */
6971
		alignment = 0;
7591
		alignment = 0;
6972
		if (need_vtd_wa(dev))
7592
		if (need_vtd_wa(dev))
6973
			alignment = 64*1024;
7593
			alignment = 64*1024;
6974
 
7594
 
6975
		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
7595
		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
6976
		if (ret) {
7596
		if (ret) {
6977
			DRM_ERROR("failed to move cursor bo into the GTT\n");
7597
			DRM_ERROR("failed to move cursor bo into the GTT\n");
6978
			goto fail_locked;
7598
			goto fail_locked;
6979
		}
7599
		}
6980
 
7600
 
6981
		ret = i915_gem_object_put_fence(obj);
7601
		ret = i915_gem_object_put_fence(obj);
6982
		if (ret) {
7602
		if (ret) {
6983
			DRM_ERROR("failed to release fence for cursor");
7603
			DRM_ERROR("failed to release fence for cursor");
6984
			goto fail_unpin;
7604
			goto fail_unpin;
6985
		}
7605
		}
6986
 
7606
 
6987
		addr = i915_gem_obj_ggtt_offset(obj);
7607
		addr = i915_gem_obj_ggtt_offset(obj);
6988
	} else {
7608
	} else {
6989
		int align = IS_I830(dev) ? 16 * 1024 : 256;
7609
		int align = IS_I830(dev) ? 16 * 1024 : 256;
6990
		ret = i915_gem_attach_phys_object(dev, obj,
7610
		ret = i915_gem_attach_phys_object(dev, obj,
6991
						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
7611
						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6992
						  align);
7612
						  align);
6993
		if (ret) {
7613
		if (ret) {
6994
			DRM_ERROR("failed to attach phys object\n");
7614
			DRM_ERROR("failed to attach phys object\n");
6995
			goto fail_locked;
7615
			goto fail_locked;
6996
		}
7616
		}
6997
		addr = obj->phys_obj->handle->busaddr;
7617
		addr = obj->phys_obj->handle->busaddr;
6998
	}
7618
	}
6999
 
7619
 
7000
	if (IS_GEN2(dev))
7620
	if (IS_GEN2(dev))
7001
		I915_WRITE(CURSIZE, (height << 12) | width);
7621
		I915_WRITE(CURSIZE, (height << 12) | width);
7002
 
7622
 
7003
 finish:
7623
 finish:
7004
	if (intel_crtc->cursor_bo) {
7624
	if (intel_crtc->cursor_bo) {
7005
		if (dev_priv->info->cursor_needs_physical) {
7625
		if (dev_priv->info->cursor_needs_physical) {
7006
			if (intel_crtc->cursor_bo != obj)
7626
			if (intel_crtc->cursor_bo != obj)
7007
				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7627
				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7008
		} else
7628
		} else
7009
			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
7629
			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
7010
		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
7630
		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
7011
	}
7631
	}
7012
 
7632
 
7013
	mutex_unlock(&dev->struct_mutex);
7633
	mutex_unlock(&dev->struct_mutex);
7014
 
7634
 
7015
	intel_crtc->cursor_addr = addr;
7635
	intel_crtc->cursor_addr = addr;
7016
	intel_crtc->cursor_bo = obj;
7636
	intel_crtc->cursor_bo = obj;
7017
	intel_crtc->cursor_width = width;
7637
	intel_crtc->cursor_width = width;
7018
	intel_crtc->cursor_height = height;
7638
	intel_crtc->cursor_height = height;
7019
 
7639
 
7020
	if (intel_crtc->active)
7640
	if (intel_crtc->active)
7021
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
7641
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
7022
 
7642
 
7023
	return 0;
7643
	return 0;
7024
fail_unpin:
7644
fail_unpin:
7025
	i915_gem_object_unpin_from_display_plane(obj);
7645
	i915_gem_object_unpin_from_display_plane(obj);
7026
fail_locked:
7646
fail_locked:
7027
	mutex_unlock(&dev->struct_mutex);
7647
	mutex_unlock(&dev->struct_mutex);
7028
fail:
7648
fail:
7029
	drm_gem_object_unreference_unlocked(&obj->base);
7649
	drm_gem_object_unreference_unlocked(&obj->base);
7030
	return ret;
7650
	return ret;
7031
}
7651
}
7032
#endif
7652
#endif
7033
 
7653
 
7034
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
7654
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
7035
{
7655
{
7036
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7656
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7037
 
7657
 
7038
	intel_crtc->cursor_x = x;
7658
	intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
7039
	intel_crtc->cursor_y = y;
7659
	intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
7040
 
7660
 
7041
	if (intel_crtc->active)
7661
	if (intel_crtc->active)
7042
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
7662
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
7043
 
7663
 
7044
	return 0;
7664
	return 0;
7045
}
7665
}
7046
 
-
 
7047
/** Sets the color ramps on behalf of RandR */
-
 
7048
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
-
 
7049
				 u16 blue, int regno)
-
 
7050
{
-
 
7051
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
7052
 
-
 
7053
	intel_crtc->lut_r[regno] = red >> 8;
-
 
7054
	intel_crtc->lut_g[regno] = green >> 8;
-
 
7055
	intel_crtc->lut_b[regno] = blue >> 8;
-
 
7056
}
-
 
7057
 
-
 
7058
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
-
 
7059
			     u16 *blue, int regno)
-
 
7060
{
-
 
7061
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
7062
 
-
 
7063
	*red = intel_crtc->lut_r[regno] << 8;
-
 
7064
	*green = intel_crtc->lut_g[regno] << 8;
-
 
7065
	*blue = intel_crtc->lut_b[regno] << 8;
-
 
7066
}
-
 
7067
 
7666
 
7068
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7667
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7069
				 u16 *blue, uint32_t start, uint32_t size)
7668
				 u16 *blue, uint32_t start, uint32_t size)
7070
{
7669
{
7071
	int end = (start + size > 256) ? 256 : start + size, i;
7670
	int end = (start + size > 256) ? 256 : start + size, i;
7072
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7671
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7073
 
7672
 
7074
	for (i = start; i < end; i++) {
7673
	for (i = start; i < end; i++) {
7075
		intel_crtc->lut_r[i] = red[i] >> 8;
7674
		intel_crtc->lut_r[i] = red[i] >> 8;
7076
		intel_crtc->lut_g[i] = green[i] >> 8;
7675
		intel_crtc->lut_g[i] = green[i] >> 8;
7077
		intel_crtc->lut_b[i] = blue[i] >> 8;
7676
		intel_crtc->lut_b[i] = blue[i] >> 8;
7078
	}
7677
	}
7079
 
7678
 
7080
	intel_crtc_load_lut(crtc);
7679
	intel_crtc_load_lut(crtc);
7081
}
7680
}
7082
 
7681
 
7083
/* VESA 640x480x72Hz mode to set on the pipe */
7682
/* VESA 640x480x72Hz mode to set on the pipe */
7084
static struct drm_display_mode load_detect_mode = {
7683
static struct drm_display_mode load_detect_mode = {
7085
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
7684
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
7086
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
7685
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
7087
};
7686
};
7088
 
7687
 
7089
static struct drm_framebuffer *
7688
struct drm_framebuffer *
7090
intel_framebuffer_create(struct drm_device *dev,
7689
intel_framebuffer_create(struct drm_device *dev,
7091
			 struct drm_mode_fb_cmd2 *mode_cmd,
7690
			 struct drm_mode_fb_cmd2 *mode_cmd,
7092
			 struct drm_i915_gem_object *obj)
7691
			 struct drm_i915_gem_object *obj)
7093
{
7692
{
7094
	struct intel_framebuffer *intel_fb;
7693
	struct intel_framebuffer *intel_fb;
7095
	int ret;
7694
	int ret;
7096
 
7695
 
7097
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7696
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7098
	if (!intel_fb) {
7697
	if (!intel_fb) {
7099
		drm_gem_object_unreference_unlocked(&obj->base);
7698
		drm_gem_object_unreference_unlocked(&obj->base);
7100
		return ERR_PTR(-ENOMEM);
7699
		return ERR_PTR(-ENOMEM);
7101
	}
7700
	}
-
 
7701
 
-
 
7702
	ret = i915_mutex_lock_interruptible(dev);
-
 
7703
	if (ret)
-
 
7704
		goto err;
7102
 
7705
 
-
 
7706
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
7103
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
7707
	mutex_unlock(&dev->struct_mutex);
-
 
7708
	if (ret)
-
 
7709
		goto err;
-
 
7710
 
-
 
7711
	return &intel_fb->base;
7104
	if (ret) {
7712
err:
7105
		drm_gem_object_unreference_unlocked(&obj->base);
7713
		drm_gem_object_unreference_unlocked(&obj->base);
7106
		kfree(intel_fb);
-
 
7107
		return ERR_PTR(ret);
-
 
7108
	}
7714
		kfree(intel_fb);
7109
 
7715
 
7110
	return &intel_fb->base;
7716
		return ERR_PTR(ret);
7111
}
7717
}
7112
 
7718
 
7113
static u32
7719
static u32
7114
intel_framebuffer_pitch_for_width(int width, int bpp)
7720
intel_framebuffer_pitch_for_width(int width, int bpp)
7115
{
7721
{
7116
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
7722
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
7117
	return ALIGN(pitch, 64);
7723
	return ALIGN(pitch, 64);
7118
}
7724
}
7119
 
7725
 
7120
static u32
7726
static u32
7121
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
7727
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
7122
{
7728
{
7123
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
7729
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
7124
	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
7730
	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
7125
}
7731
}
7126
 
7732
 
7127
static struct drm_framebuffer *
7733
static struct drm_framebuffer *
7128
intel_framebuffer_create_for_mode(struct drm_device *dev,
7734
intel_framebuffer_create_for_mode(struct drm_device *dev,
7129
				  struct drm_display_mode *mode,
7735
				  struct drm_display_mode *mode,
7130
				  int depth, int bpp)
7736
				  int depth, int bpp)
7131
{
7737
{
7132
	struct drm_i915_gem_object *obj;
7738
	struct drm_i915_gem_object *obj;
7133
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
7739
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
7134
 
7740
 
7135
	return NULL;
7741
	return NULL;
7136
}
7742
}
7137
 
7743
 
7138
static struct drm_framebuffer *
7744
static struct drm_framebuffer *
7139
mode_fits_in_fbdev(struct drm_device *dev,
7745
mode_fits_in_fbdev(struct drm_device *dev,
7140
		   struct drm_display_mode *mode)
7746
		   struct drm_display_mode *mode)
7141
{
7747
{
-
 
7748
#ifdef CONFIG_DRM_I915_FBDEV
7142
	struct drm_i915_private *dev_priv = dev->dev_private;
7749
	struct drm_i915_private *dev_priv = dev->dev_private;
7143
	struct drm_i915_gem_object *obj;
7750
	struct drm_i915_gem_object *obj;
7144
	struct drm_framebuffer *fb;
7751
	struct drm_framebuffer *fb;
7145
 
7752
 
7146
	if (dev_priv->fbdev == NULL)
7753
	if (dev_priv->fbdev == NULL)
7147
		return NULL;
7754
		return NULL;
7148
 
7755
 
7149
	obj = dev_priv->fbdev->ifb.obj;
7756
	obj = dev_priv->fbdev->ifb.obj;
7150
	if (obj == NULL)
7757
	if (obj == NULL)
7151
		return NULL;
7758
		return NULL;
7152
 
7759
 
7153
	fb = &dev_priv->fbdev->ifb.base;
7760
	fb = &dev_priv->fbdev->ifb.base;
7154
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
7761
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
7155
							       fb->bits_per_pixel))
7762
							       fb->bits_per_pixel))
7156
		return NULL;
7763
		return NULL;
7157
 
7764
 
7158
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
7765
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
7159
		return NULL;
7766
		return NULL;
7160
 
7767
 
7161
	return fb;
7768
	return fb;
-
 
7769
#else
-
 
7770
	return NULL;
-
 
7771
#endif
7162
}
7772
}
7163
 
7773
 
7164
bool intel_get_load_detect_pipe(struct drm_connector *connector,
7774
bool intel_get_load_detect_pipe(struct drm_connector *connector,
7165
				struct drm_display_mode *mode,
7775
				struct drm_display_mode *mode,
7166
				struct intel_load_detect_pipe *old)
7776
				struct intel_load_detect_pipe *old)
7167
{
7777
{
7168
	struct intel_crtc *intel_crtc;
7778
	struct intel_crtc *intel_crtc;
7169
	struct intel_encoder *intel_encoder =
7779
	struct intel_encoder *intel_encoder =
7170
		intel_attached_encoder(connector);
7780
		intel_attached_encoder(connector);
7171
	struct drm_crtc *possible_crtc;
7781
	struct drm_crtc *possible_crtc;
7172
	struct drm_encoder *encoder = &intel_encoder->base;
7782
	struct drm_encoder *encoder = &intel_encoder->base;
7173
	struct drm_crtc *crtc = NULL;
7783
	struct drm_crtc *crtc = NULL;
7174
	struct drm_device *dev = encoder->dev;
7784
	struct drm_device *dev = encoder->dev;
7175
	struct drm_framebuffer *fb;
7785
	struct drm_framebuffer *fb;
7176
	int i = -1;
7786
	int i = -1;
7177
 
7787
 
7178
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7788
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7179
		      connector->base.id, drm_get_connector_name(connector),
7789
		      connector->base.id, drm_get_connector_name(connector),
7180
		      encoder->base.id, drm_get_encoder_name(encoder));
7790
		      encoder->base.id, drm_get_encoder_name(encoder));
7181
 
7791
 
7182
	/*
7792
	/*
7183
	 * Algorithm gets a little messy:
7793
	 * Algorithm gets a little messy:
7184
	 *
7794
	 *
7185
	 *   - if the connector already has an assigned crtc, use it (but make
7795
	 *   - if the connector already has an assigned crtc, use it (but make
7186
	 *     sure it's on first)
7796
	 *     sure it's on first)
7187
	 *
7797
	 *
7188
	 *   - try to find the first unused crtc that can drive this connector,
7798
	 *   - try to find the first unused crtc that can drive this connector,
7189
	 *     and use that if we find one
7799
	 *     and use that if we find one
7190
	 */
7800
	 */
7191
 
7801
 
7192
	/* See if we already have a CRTC for this connector */
7802
	/* See if we already have a CRTC for this connector */
7193
	if (encoder->crtc) {
7803
	if (encoder->crtc) {
7194
		crtc = encoder->crtc;
7804
		crtc = encoder->crtc;
7195
 
7805
 
7196
		mutex_lock(&crtc->mutex);
7806
		mutex_lock(&crtc->mutex);
7197
 
7807
 
7198
		old->dpms_mode = connector->dpms;
7808
		old->dpms_mode = connector->dpms;
7199
		old->load_detect_temp = false;
7809
		old->load_detect_temp = false;
7200
 
7810
 
7201
		/* Make sure the crtc and connector are running */
7811
		/* Make sure the crtc and connector are running */
7202
		if (connector->dpms != DRM_MODE_DPMS_ON)
7812
		if (connector->dpms != DRM_MODE_DPMS_ON)
7203
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
7813
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
7204
 
7814
 
7205
		return true;
7815
		return true;
7206
	}
7816
	}
7207
 
7817
 
7208
	/* Find an unused one (if possible) */
7818
	/* Find an unused one (if possible) */
7209
	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
7819
	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
7210
		i++;
7820
		i++;
7211
		if (!(encoder->possible_crtcs & (1 << i)))
7821
		if (!(encoder->possible_crtcs & (1 << i)))
7212
			continue;
7822
			continue;
7213
		if (!possible_crtc->enabled) {
7823
		if (!possible_crtc->enabled) {
7214
			crtc = possible_crtc;
7824
			crtc = possible_crtc;
7215
			break;
7825
			break;
7216
		}
7826
		}
7217
	}
7827
	}
7218
 
7828
 
7219
	/*
7829
	/*
7220
	 * If we didn't find an unused CRTC, don't use any.
7830
	 * If we didn't find an unused CRTC, don't use any.
7221
	 */
7831
	 */
7222
	if (!crtc) {
7832
	if (!crtc) {
7223
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
7833
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
7224
		return false;
7834
		return false;
7225
	}
7835
	}
7226
 
7836
 
7227
	mutex_lock(&crtc->mutex);
7837
	mutex_lock(&crtc->mutex);
7228
	intel_encoder->new_crtc = to_intel_crtc(crtc);
7838
	intel_encoder->new_crtc = to_intel_crtc(crtc);
7229
	to_intel_connector(connector)->new_encoder = intel_encoder;
7839
	to_intel_connector(connector)->new_encoder = intel_encoder;
7230
 
7840
 
7231
	intel_crtc = to_intel_crtc(crtc);
7841
	intel_crtc = to_intel_crtc(crtc);
7232
	old->dpms_mode = connector->dpms;
7842
	old->dpms_mode = connector->dpms;
7233
	old->load_detect_temp = true;
7843
	old->load_detect_temp = true;
7234
	old->release_fb = NULL;
7844
	old->release_fb = NULL;
7235
 
7845
 
7236
	if (!mode)
7846
	if (!mode)
7237
		mode = &load_detect_mode;
7847
		mode = &load_detect_mode;
7238
 
7848
 
7239
	/* We need a framebuffer large enough to accommodate all accesses
7849
	/* We need a framebuffer large enough to accommodate all accesses
7240
	 * that the plane may generate whilst we perform load detection.
7850
	 * that the plane may generate whilst we perform load detection.
7241
	 * We can not rely on the fbcon either being present (we get called
7851
	 * We can not rely on the fbcon either being present (we get called
7242
	 * during its initialisation to detect all boot displays, or it may
7852
	 * during its initialisation to detect all boot displays, or it may
7243
	 * not even exist) or that it is large enough to satisfy the
7853
	 * not even exist) or that it is large enough to satisfy the
7244
	 * requested mode.
7854
	 * requested mode.
7245
	 */
7855
	 */
7246
	fb = mode_fits_in_fbdev(dev, mode);
7856
	fb = mode_fits_in_fbdev(dev, mode);
7247
	if (fb == NULL) {
7857
	if (fb == NULL) {
7248
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
7858
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
7249
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
7859
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
7250
		old->release_fb = fb;
7860
		old->release_fb = fb;
7251
	} else
7861
	} else
7252
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
7862
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
7253
	if (IS_ERR(fb)) {
7863
	if (IS_ERR(fb)) {
7254
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
7864
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
7255
		mutex_unlock(&crtc->mutex);
7865
		mutex_unlock(&crtc->mutex);
7256
		return false;
7866
		return false;
7257
	}
7867
	}
7258
 
7868
 
7259
	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
7869
	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
7260
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
7870
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
7261
		if (old->release_fb)
7871
		if (old->release_fb)
7262
			old->release_fb->funcs->destroy(old->release_fb);
7872
			old->release_fb->funcs->destroy(old->release_fb);
7263
		mutex_unlock(&crtc->mutex);
7873
		mutex_unlock(&crtc->mutex);
7264
		return false;
7874
		return false;
7265
	}
7875
	}
7266
 
7876
 
7267
	/* let the connector get through one full cycle before testing */
7877
	/* let the connector get through one full cycle before testing */
7268
	intel_wait_for_vblank(dev, intel_crtc->pipe);
7878
	intel_wait_for_vblank(dev, intel_crtc->pipe);
7269
	return true;
7879
	return true;
7270
}
7880
}
7271
 
7881
 
7272
void intel_release_load_detect_pipe(struct drm_connector *connector,
7882
void intel_release_load_detect_pipe(struct drm_connector *connector,
7273
				    struct intel_load_detect_pipe *old)
7883
				    struct intel_load_detect_pipe *old)
7274
{
7884
{
7275
	struct intel_encoder *intel_encoder =
7885
	struct intel_encoder *intel_encoder =
7276
		intel_attached_encoder(connector);
7886
		intel_attached_encoder(connector);
7277
	struct drm_encoder *encoder = &intel_encoder->base;
7887
	struct drm_encoder *encoder = &intel_encoder->base;
7278
	struct drm_crtc *crtc = encoder->crtc;
7888
	struct drm_crtc *crtc = encoder->crtc;
7279
 
7889
 
7280
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7890
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7281
		      connector->base.id, drm_get_connector_name(connector),
7891
		      connector->base.id, drm_get_connector_name(connector),
7282
		      encoder->base.id, drm_get_encoder_name(encoder));
7892
		      encoder->base.id, drm_get_encoder_name(encoder));
7283
 
7893
 
7284
	if (old->load_detect_temp) {
7894
	if (old->load_detect_temp) {
7285
		to_intel_connector(connector)->new_encoder = NULL;
7895
		to_intel_connector(connector)->new_encoder = NULL;
7286
		intel_encoder->new_crtc = NULL;
7896
		intel_encoder->new_crtc = NULL;
7287
		intel_set_mode(crtc, NULL, 0, 0, NULL);
7897
		intel_set_mode(crtc, NULL, 0, 0, NULL);
7288
 
7898
 
7289
		if (old->release_fb) {
7899
		if (old->release_fb) {
7290
			drm_framebuffer_unregister_private(old->release_fb);
7900
			drm_framebuffer_unregister_private(old->release_fb);
7291
			drm_framebuffer_unreference(old->release_fb);
7901
			drm_framebuffer_unreference(old->release_fb);
7292
		}
7902
		}
7293
 
7903
 
7294
		mutex_unlock(&crtc->mutex);
7904
		mutex_unlock(&crtc->mutex);
7295
		return;
7905
		return;
7296
	}
7906
	}
7297
 
7907
 
7298
	/* Switch crtc and encoder back off if necessary */
7908
	/* Switch crtc and encoder back off if necessary */
7299
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
7909
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
7300
		connector->funcs->dpms(connector, old->dpms_mode);
7910
		connector->funcs->dpms(connector, old->dpms_mode);
7301
 
7911
 
7302
	mutex_unlock(&crtc->mutex);
7912
	mutex_unlock(&crtc->mutex);
7303
}
7913
}
-
 
7914
 
-
 
7915
static int i9xx_pll_refclk(struct drm_device *dev,
-
 
7916
			   const struct intel_crtc_config *pipe_config)
-
 
7917
{
-
 
7918
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
7919
	u32 dpll = pipe_config->dpll_hw_state.dpll;
-
 
7920
 
-
 
7921
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
-
 
7922
		return dev_priv->vbt.lvds_ssc_freq;
-
 
7923
	else if (HAS_PCH_SPLIT(dev))
-
 
7924
		return 120000;
-
 
7925
	else if (!IS_GEN2(dev))
-
 
7926
		return 96000;
-
 
7927
	else
-
 
7928
		return 48000;
-
 
7929
}
7304
 
7930
 
7305
/* Returns the clock of the currently programmed mode of the given pipe. */
7931
/* Returns the clock of the currently programmed mode of the given pipe. */
7306
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7932
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7307
				struct intel_crtc_config *pipe_config)
7933
				struct intel_crtc_config *pipe_config)
7308
{
7934
{
7309
	struct drm_device *dev = crtc->base.dev;
7935
	struct drm_device *dev = crtc->base.dev;
7310
	struct drm_i915_private *dev_priv = dev->dev_private;
7936
	struct drm_i915_private *dev_priv = dev->dev_private;
7311
	int pipe = pipe_config->cpu_transcoder;
7937
	int pipe = pipe_config->cpu_transcoder;
7312
	u32 dpll = I915_READ(DPLL(pipe));
7938
	u32 dpll = pipe_config->dpll_hw_state.dpll;
7313
	u32 fp;
7939
	u32 fp;
7314
	intel_clock_t clock;
7940
	intel_clock_t clock;
-
 
7941
	int refclk = i9xx_pll_refclk(dev, pipe_config);
7315
 
7942
 
7316
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7943
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7317
		fp = I915_READ(FP0(pipe));
7944
		fp = pipe_config->dpll_hw_state.fp0;
7318
	else
7945
	else
7319
		fp = I915_READ(FP1(pipe));
7946
		fp = pipe_config->dpll_hw_state.fp1;
7320
 
7947
 
7321
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7948
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7322
	if (IS_PINEVIEW(dev)) {
7949
	if (IS_PINEVIEW(dev)) {
7323
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7950
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7324
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
7951
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
7325
	} else {
7952
	} else {
7326
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7953
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7327
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7954
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7328
	}
7955
	}
7329
 
7956
 
7330
	if (!IS_GEN2(dev)) {
7957
	if (!IS_GEN2(dev)) {
7331
		if (IS_PINEVIEW(dev))
7958
		if (IS_PINEVIEW(dev))
7332
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7959
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7333
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
7960
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
7334
		else
7961
		else
7335
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
7962
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
7336
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
7963
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
7337
 
7964
 
7338
		switch (dpll & DPLL_MODE_MASK) {
7965
		switch (dpll & DPLL_MODE_MASK) {
7339
		case DPLLB_MODE_DAC_SERIAL:
7966
		case DPLLB_MODE_DAC_SERIAL:
7340
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7967
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7341
				5 : 10;
7968
				5 : 10;
7342
			break;
7969
			break;
7343
		case DPLLB_MODE_LVDS:
7970
		case DPLLB_MODE_LVDS:
7344
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7971
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7345
				7 : 14;
7972
				7 : 14;
7346
			break;
7973
			break;
7347
		default:
7974
		default:
7348
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
7975
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
7349
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
7976
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
7350
			pipe_config->adjusted_mode.clock = 0;
-
 
7351
			return;
7977
			return;
7352
		}
7978
		}
7353
 
7979
 
7354
		if (IS_PINEVIEW(dev))
7980
		if (IS_PINEVIEW(dev))
7355
			pineview_clock(96000, &clock);
7981
			pineview_clock(refclk, &clock);
7356
		else
7982
		else
7357
			i9xx_clock(96000, &clock);
7983
			i9xx_clock(refclk, &clock);
7358
	} else {
7984
	} else {
-
 
7985
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
7359
		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
7986
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
7360
 
7987
 
7361
		if (is_lvds) {
7988
		if (is_lvds) {
7362
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7989
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7363
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
7990
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
7364
			clock.p2 = 14;
-
 
7365
 
7991
 
7366
			if ((dpll & PLL_REF_INPUT_MASK) ==
-
 
7367
			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
-
 
7368
				/* XXX: might not be 66MHz */
7992
			if (lvds & LVDS_CLKB_POWER_UP)
7369
				i9xx_clock(66000, &clock);
7993
				clock.p2 = 7;
7370
			} else
7994
			else
7371
				i9xx_clock(48000, &clock);
7995
			clock.p2 = 14;
7372
		} else {
7996
		} else {
7373
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
7997
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
7374
				clock.p1 = 2;
7998
				clock.p1 = 2;
7375
			else {
7999
			else {
7376
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8000
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
7377
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8001
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
7378
			}
8002
			}
7379
			if (dpll & PLL_P2_DIVIDE_BY_4)
8003
			if (dpll & PLL_P2_DIVIDE_BY_4)
7380
				clock.p2 = 4;
8004
				clock.p2 = 4;
7381
			else
8005
			else
7382
				clock.p2 = 2;
8006
				clock.p2 = 2;
7383
 
-
 
7384
			i9xx_clock(48000, &clock);
-
 
7385
		}
8007
		}
-
 
8008
 
-
 
8009
		i9xx_clock(refclk, &clock);
7386
	}
8010
	}
-
 
8011
 
-
 
8012
	/*
-
 
8013
	 * This value includes pixel_multiplier. We will use
-
 
8014
	 * port_clock to compute adjusted_mode.crtc_clock in the
-
 
8015
	 * encoder's get_config() function.
7387
 
8016
	 */
7388
	pipe_config->adjusted_mode.clock = clock.dot;
8017
	pipe_config->port_clock = clock.dot;
7389
}
8018
}
7390
 
8019
 
7391
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
8020
int intel_dotclock_calculate(int link_freq,
7392
				    struct intel_crtc_config *pipe_config)
-
 
7393
{
-
 
7394
	struct drm_device *dev = crtc->base.dev;
-
 
7395
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
7396
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
-
 
7397
	int link_freq, repeat;
-
 
7398
	u64 clock;
-
 
7399
	u32 link_m, link_n;
-
 
7400
 
-
 
7401
	repeat = pipe_config->pixel_multiplier;
8021
			     const struct intel_link_m_n *m_n)
7402
 
8022
{
7403
	/*
8023
	/*
7404
	 * The calculation for the data clock is:
8024
	 * The calculation for the data clock is:
7405
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
8025
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
7406
	 * But we want to avoid losing precison if possible, so:
8026
	 * But we want to avoid losing precison if possible, so:
7407
	 * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
8027
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
7408
	 *
8028
	 *
7409
	 * and the link clock is simpler:
8029
	 * and the link clock is simpler:
7410
	 * link_clock = (m * link_clock * repeat) / n
-
 
7411
	 */
-
 
7412
 
-
 
7413
	/*
-
 
7414
	 * We need to get the FDI or DP link clock here to derive
-
 
7415
	 * the M/N dividers.
-
 
7416
	 *
-
 
7417
	 * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
-
 
7418
	 * For DP, it's either 1.62GHz or 2.7GHz.
8030
	 * link_clock = (m * link_clock) / n
7419
	 * We do our calculations in 10*MHz since we don't need much precison.
-
 
7420
	 */
8031
	 */
7421
	if (pipe_config->has_pch_encoder)
-
 
7422
		link_freq = intel_fdi_link_freq(dev) * 10000;
-
 
7423
	else
8032
 
-
 
8033
	if (!m_n->link_n)
-
 
8034
		return 0;
7424
		link_freq = pipe_config->port_clock;
8035
 
7425
 
8036
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
-
 
8037
}
7426
	link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
8038
 
7427
	link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
8039
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
-
 
8040
				   struct intel_crtc_config *pipe_config)
-
 
8041
{
-
 
8042
	struct drm_device *dev = crtc->base.dev;
-
 
8043
 
-
 
8044
	/* read out port_clock from the DPLL */
-
 
8045
	i9xx_crtc_clock_get(crtc, pipe_config);
7428
 
8046
 
-
 
8047
	/*
-
 
8048
	 * This value does not include pixel_multiplier.
7429
	if (!link_m || !link_n)
8049
	 * We will check that port_clock and adjusted_mode.crtc_clock
7430
		return;
8050
	 * agree once we know their relationship in the encoder's
7431
 
8051
	 * get_config() function.
7432
	clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
8052
	 */
7433
	do_div(clock, link_n);
8053
	pipe_config->adjusted_mode.crtc_clock =
7434
 
8054
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
7435
	pipe_config->adjusted_mode.clock = clock;
8055
					 &pipe_config->fdi_m_n);
7436
}
8056
}
7437
 
8057
 
7438
/** Returns the currently programmed mode of the given pipe. */
8058
/** Returns the currently programmed mode of the given pipe. */
7439
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8059
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7440
					     struct drm_crtc *crtc)
8060
					     struct drm_crtc *crtc)
7441
{
8061
{
7442
	struct drm_i915_private *dev_priv = dev->dev_private;
8062
	struct drm_i915_private *dev_priv = dev->dev_private;
7443
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8063
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7444
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
8064
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
7445
	struct drm_display_mode *mode;
8065
	struct drm_display_mode *mode;
7446
	struct intel_crtc_config pipe_config;
8066
	struct intel_crtc_config pipe_config;
7447
	int htot = I915_READ(HTOTAL(cpu_transcoder));
8067
	int htot = I915_READ(HTOTAL(cpu_transcoder));
7448
	int hsync = I915_READ(HSYNC(cpu_transcoder));
8068
	int hsync = I915_READ(HSYNC(cpu_transcoder));
7449
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
8069
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
7450
	int vsync = I915_READ(VSYNC(cpu_transcoder));
8070
	int vsync = I915_READ(VSYNC(cpu_transcoder));
-
 
8071
	enum pipe pipe = intel_crtc->pipe;
7451
 
8072
 
7452
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8073
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
7453
	if (!mode)
8074
	if (!mode)
7454
		return NULL;
8075
		return NULL;
7455
 
8076
 
7456
	/*
8077
	/*
7457
	 * Construct a pipe_config sufficient for getting the clock info
8078
	 * Construct a pipe_config sufficient for getting the clock info
7458
	 * back out of crtc_clock_get.
8079
	 * back out of crtc_clock_get.
7459
	 *
8080
	 *
7460
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8081
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
7461
	 * to use a real value here instead.
8082
	 * to use a real value here instead.
7462
	 */
8083
	 */
7463
	pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
8084
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
7464
	pipe_config.pixel_multiplier = 1;
8085
	pipe_config.pixel_multiplier = 1;
-
 
8086
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
-
 
8087
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
-
 
8088
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
7465
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8089
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
7466
 
8090
 
7467
	mode->clock = pipe_config.adjusted_mode.clock;
8091
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
7468
	mode->hdisplay = (htot & 0xffff) + 1;
8092
	mode->hdisplay = (htot & 0xffff) + 1;
7469
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8093
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
7470
	mode->hsync_start = (hsync & 0xffff) + 1;
8094
	mode->hsync_start = (hsync & 0xffff) + 1;
7471
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8095
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
7472
	mode->vdisplay = (vtot & 0xffff) + 1;
8096
	mode->vdisplay = (vtot & 0xffff) + 1;
7473
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8097
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
7474
	mode->vsync_start = (vsync & 0xffff) + 1;
8098
	mode->vsync_start = (vsync & 0xffff) + 1;
7475
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8099
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
7476
 
8100
 
7477
	drm_mode_set_name(mode);
8101
	drm_mode_set_name(mode);
7478
 
8102
 
7479
	return mode;
8103
	return mode;
7480
}
8104
}
7481
 
8105
 
7482
static void intel_increase_pllclock(struct drm_crtc *crtc)
8106
static void intel_increase_pllclock(struct drm_crtc *crtc)
7483
{
8107
{
7484
	struct drm_device *dev = crtc->dev;
8108
	struct drm_device *dev = crtc->dev;
7485
	drm_i915_private_t *dev_priv = dev->dev_private;
8109
	drm_i915_private_t *dev_priv = dev->dev_private;
7486
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8110
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7487
	int pipe = intel_crtc->pipe;
8111
	int pipe = intel_crtc->pipe;
7488
	int dpll_reg = DPLL(pipe);
8112
	int dpll_reg = DPLL(pipe);
7489
	int dpll;
8113
	int dpll;
7490
 
8114
 
7491
	if (HAS_PCH_SPLIT(dev))
8115
	if (HAS_PCH_SPLIT(dev))
7492
		return;
8116
		return;
7493
 
8117
 
7494
	if (!dev_priv->lvds_downclock_avail)
8118
	if (!dev_priv->lvds_downclock_avail)
7495
		return;
8119
		return;
7496
 
8120
 
7497
	dpll = I915_READ(dpll_reg);
8121
	dpll = I915_READ(dpll_reg);
7498
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
8122
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
7499
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
8123
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
7500
 
8124
 
7501
		assert_panel_unlocked(dev_priv, pipe);
8125
		assert_panel_unlocked(dev_priv, pipe);
7502
 
8126
 
7503
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
8127
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
7504
		I915_WRITE(dpll_reg, dpll);
8128
		I915_WRITE(dpll_reg, dpll);
7505
		intel_wait_for_vblank(dev, pipe);
8129
		intel_wait_for_vblank(dev, pipe);
7506
 
8130
 
7507
		dpll = I915_READ(dpll_reg);
8131
		dpll = I915_READ(dpll_reg);
7508
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
8132
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
7509
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
8133
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
7510
	}
8134
	}
7511
}
8135
}
7512
 
8136
 
7513
static void intel_decrease_pllclock(struct drm_crtc *crtc)
8137
static void intel_decrease_pllclock(struct drm_crtc *crtc)
7514
{
8138
{
7515
	struct drm_device *dev = crtc->dev;
8139
	struct drm_device *dev = crtc->dev;
7516
	drm_i915_private_t *dev_priv = dev->dev_private;
8140
	drm_i915_private_t *dev_priv = dev->dev_private;
7517
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8141
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7518
 
8142
 
7519
	if (HAS_PCH_SPLIT(dev))
8143
	if (HAS_PCH_SPLIT(dev))
7520
		return;
8144
		return;
7521
 
8145
 
7522
	if (!dev_priv->lvds_downclock_avail)
8146
	if (!dev_priv->lvds_downclock_avail)
7523
		return;
8147
		return;
7524
 
8148
 
7525
	/*
8149
	/*
7526
	 * Since this is called by a timer, we should never get here in
8150
	 * Since this is called by a timer, we should never get here in
7527
	 * the manual case.
8151
	 * the manual case.
7528
	 */
8152
	 */
7529
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
8153
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7530
		int pipe = intel_crtc->pipe;
8154
		int pipe = intel_crtc->pipe;
7531
		int dpll_reg = DPLL(pipe);
8155
		int dpll_reg = DPLL(pipe);
7532
		int dpll;
8156
		int dpll;
7533
 
8157
 
7534
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
8158
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
7535
 
8159
 
7536
		assert_panel_unlocked(dev_priv, pipe);
8160
		assert_panel_unlocked(dev_priv, pipe);
7537
 
8161
 
7538
		dpll = I915_READ(dpll_reg);
8162
		dpll = I915_READ(dpll_reg);
7539
		dpll |= DISPLAY_RATE_SELECT_FPA1;
8163
		dpll |= DISPLAY_RATE_SELECT_FPA1;
7540
		I915_WRITE(dpll_reg, dpll);
8164
		I915_WRITE(dpll_reg, dpll);
7541
		intel_wait_for_vblank(dev, pipe);
8165
		intel_wait_for_vblank(dev, pipe);
7542
		dpll = I915_READ(dpll_reg);
8166
		dpll = I915_READ(dpll_reg);
7543
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
8167
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7544
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
8168
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7545
	}
8169
	}
7546
 
8170
 
7547
}
8171
}
7548
 
8172
 
7549
void intel_mark_busy(struct drm_device *dev)
8173
void intel_mark_busy(struct drm_device *dev)
7550
{
8174
{
7551
	struct drm_i915_private *dev_priv = dev->dev_private;
8175
	struct drm_i915_private *dev_priv = dev->dev_private;
7552
 
8176
 
7553
	hsw_package_c8_gpu_busy(dev_priv);
8177
	hsw_package_c8_gpu_busy(dev_priv);
7554
	i915_update_gfx_val(dev_priv);
8178
	i915_update_gfx_val(dev_priv);
7555
}
8179
}
7556
 
8180
 
7557
void intel_mark_idle(struct drm_device *dev)
8181
void intel_mark_idle(struct drm_device *dev)
7558
{
8182
{
7559
	struct drm_i915_private *dev_priv = dev->dev_private;
8183
	struct drm_i915_private *dev_priv = dev->dev_private;
7560
	struct drm_crtc *crtc;
8184
	struct drm_crtc *crtc;
7561
 
8185
 
7562
	hsw_package_c8_gpu_idle(dev_priv);
8186
	hsw_package_c8_gpu_idle(dev_priv);
7563
 
8187
 
7564
	if (!i915_powersave)
8188
	if (!i915_powersave)
7565
		return;
8189
		return;
7566
 
8190
 
7567
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8191
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7568
		if (!crtc->fb)
8192
		if (!crtc->fb)
7569
			continue;
8193
			continue;
7570
 
8194
 
7571
		intel_decrease_pllclock(crtc);
8195
		intel_decrease_pllclock(crtc);
7572
	}
8196
	}
-
 
8197
 
-
 
8198
	if (dev_priv->info->gen >= 6)
-
 
8199
		gen6_rps_idle(dev->dev_private);
7573
}
8200
}
7574
 
8201
 
7575
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
8202
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
7576
			struct intel_ring_buffer *ring)
8203
			struct intel_ring_buffer *ring)
7577
{
8204
{
7578
	struct drm_device *dev = obj->base.dev;
8205
	struct drm_device *dev = obj->base.dev;
7579
	struct drm_crtc *crtc;
8206
	struct drm_crtc *crtc;
7580
 
8207
 
7581
	if (!i915_powersave)
8208
	if (!i915_powersave)
7582
		return;
8209
		return;
7583
 
8210
 
7584
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8211
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7585
		if (!crtc->fb)
8212
		if (!crtc->fb)
7586
			continue;
8213
			continue;
7587
 
8214
 
7588
		if (to_intel_framebuffer(crtc->fb)->obj != obj)
8215
		if (to_intel_framebuffer(crtc->fb)->obj != obj)
7589
			continue;
8216
			continue;
7590
 
8217
 
7591
			intel_increase_pllclock(crtc);
8218
			intel_increase_pllclock(crtc);
7592
		if (ring && intel_fbc_enabled(dev))
8219
		if (ring && intel_fbc_enabled(dev))
7593
			ring->fbc_dirty = true;
8220
			ring->fbc_dirty = true;
7594
	}
8221
	}
7595
}
8222
}
7596
 
8223
 
7597
static void intel_crtc_destroy(struct drm_crtc *crtc)
8224
static void intel_crtc_destroy(struct drm_crtc *crtc)
7598
{
8225
{
7599
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8226
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7600
	struct drm_device *dev = crtc->dev;
8227
	struct drm_device *dev = crtc->dev;
7601
	struct intel_unpin_work *work;
8228
	struct intel_unpin_work *work;
7602
	unsigned long flags;
8229
	unsigned long flags;
7603
 
8230
 
7604
	spin_lock_irqsave(&dev->event_lock, flags);
8231
	spin_lock_irqsave(&dev->event_lock, flags);
7605
	work = intel_crtc->unpin_work;
8232
	work = intel_crtc->unpin_work;
7606
	intel_crtc->unpin_work = NULL;
8233
	intel_crtc->unpin_work = NULL;
7607
	spin_unlock_irqrestore(&dev->event_lock, flags);
8234
	spin_unlock_irqrestore(&dev->event_lock, flags);
7608
 
8235
 
7609
	if (work) {
8236
	if (work) {
7610
		cancel_work_sync(&work->work);
8237
		cancel_work_sync(&work->work);
7611
		kfree(work);
8238
		kfree(work);
7612
	}
8239
	}
7613
 
8240
 
7614
	drm_crtc_cleanup(crtc);
8241
	drm_crtc_cleanup(crtc);
7615
 
8242
 
7616
	kfree(intel_crtc);
8243
	kfree(intel_crtc);
7617
}
8244
}
7618
 
8245
 
7619
#if 0
8246
#if 0
7620
static void intel_unpin_work_fn(struct work_struct *__work)
8247
static void intel_unpin_work_fn(struct work_struct *__work)
7621
{
8248
{
7622
	struct intel_unpin_work *work =
8249
	struct intel_unpin_work *work =
7623
		container_of(__work, struct intel_unpin_work, work);
8250
		container_of(__work, struct intel_unpin_work, work);
7624
	struct drm_device *dev = work->crtc->dev;
8251
	struct drm_device *dev = work->crtc->dev;
7625
 
8252
 
7626
	mutex_lock(&dev->struct_mutex);
8253
	mutex_lock(&dev->struct_mutex);
7627
	intel_unpin_fb_obj(work->old_fb_obj);
8254
	intel_unpin_fb_obj(work->old_fb_obj);
7628
	drm_gem_object_unreference(&work->pending_flip_obj->base);
8255
	drm_gem_object_unreference(&work->pending_flip_obj->base);
7629
	drm_gem_object_unreference(&work->old_fb_obj->base);
8256
	drm_gem_object_unreference(&work->old_fb_obj->base);
7630
 
8257
 
7631
	intel_update_fbc(dev);
8258
	intel_update_fbc(dev);
7632
	mutex_unlock(&dev->struct_mutex);
8259
	mutex_unlock(&dev->struct_mutex);
7633
 
8260
 
7634
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
8261
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
7635
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
8262
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
7636
 
8263
 
7637
	kfree(work);
8264
	kfree(work);
7638
}
8265
}
7639
 
8266
 
7640
static void do_intel_finish_page_flip(struct drm_device *dev,
8267
static void do_intel_finish_page_flip(struct drm_device *dev,
7641
				      struct drm_crtc *crtc)
8268
				      struct drm_crtc *crtc)
7642
{
8269
{
7643
	drm_i915_private_t *dev_priv = dev->dev_private;
8270
	drm_i915_private_t *dev_priv = dev->dev_private;
7644
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8271
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7645
	struct intel_unpin_work *work;
8272
	struct intel_unpin_work *work;
7646
	unsigned long flags;
8273
	unsigned long flags;
7647
 
8274
 
7648
	/* Ignore early vblank irqs */
8275
	/* Ignore early vblank irqs */
7649
	if (intel_crtc == NULL)
8276
	if (intel_crtc == NULL)
7650
		return;
8277
		return;
7651
 
8278
 
7652
	spin_lock_irqsave(&dev->event_lock, flags);
8279
	spin_lock_irqsave(&dev->event_lock, flags);
7653
	work = intel_crtc->unpin_work;
8280
	work = intel_crtc->unpin_work;
7654
 
8281
 
7655
	/* Ensure we don't miss a work->pending update ... */
8282
	/* Ensure we don't miss a work->pending update ... */
7656
	smp_rmb();
8283
	smp_rmb();
7657
 
8284
 
7658
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
8285
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
7659
		spin_unlock_irqrestore(&dev->event_lock, flags);
8286
		spin_unlock_irqrestore(&dev->event_lock, flags);
7660
		return;
8287
		return;
7661
	}
8288
	}
7662
 
8289
 
7663
	/* and that the unpin work is consistent wrt ->pending. */
8290
	/* and that the unpin work is consistent wrt ->pending. */
7664
	smp_rmb();
8291
	smp_rmb();
7665
 
8292
 
7666
	intel_crtc->unpin_work = NULL;
8293
	intel_crtc->unpin_work = NULL;
7667
 
8294
 
7668
	if (work->event)
8295
	if (work->event)
7669
		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
8296
		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
7670
 
8297
 
7671
	drm_vblank_put(dev, intel_crtc->pipe);
8298
	drm_vblank_put(dev, intel_crtc->pipe);
7672
 
8299
 
7673
	spin_unlock_irqrestore(&dev->event_lock, flags);
8300
	spin_unlock_irqrestore(&dev->event_lock, flags);
7674
 
8301
 
7675
	wake_up_all(&dev_priv->pending_flip_queue);
8302
	wake_up_all(&dev_priv->pending_flip_queue);
7676
 
8303
 
7677
	queue_work(dev_priv->wq, &work->work);
8304
	queue_work(dev_priv->wq, &work->work);
7678
 
8305
 
7679
	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
8306
	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
7680
}
8307
}
7681
 
8308
 
7682
void intel_finish_page_flip(struct drm_device *dev, int pipe)
8309
void intel_finish_page_flip(struct drm_device *dev, int pipe)
7683
{
8310
{
7684
	drm_i915_private_t *dev_priv = dev->dev_private;
8311
	drm_i915_private_t *dev_priv = dev->dev_private;
7685
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
8312
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
7686
 
8313
 
7687
	do_intel_finish_page_flip(dev, crtc);
8314
	do_intel_finish_page_flip(dev, crtc);
7688
}
8315
}
7689
 
8316
 
7690
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
8317
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
7691
{
8318
{
7692
	drm_i915_private_t *dev_priv = dev->dev_private;
8319
	drm_i915_private_t *dev_priv = dev->dev_private;
7693
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
8320
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
7694
 
8321
 
7695
	do_intel_finish_page_flip(dev, crtc);
8322
	do_intel_finish_page_flip(dev, crtc);
7696
}
8323
}
7697
 
8324
 
7698
void intel_prepare_page_flip(struct drm_device *dev, int plane)
8325
void intel_prepare_page_flip(struct drm_device *dev, int plane)
7699
{
8326
{
7700
	drm_i915_private_t *dev_priv = dev->dev_private;
8327
	drm_i915_private_t *dev_priv = dev->dev_private;
7701
	struct intel_crtc *intel_crtc =
8328
	struct intel_crtc *intel_crtc =
7702
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
8329
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7703
	unsigned long flags;
8330
	unsigned long flags;
7704
 
8331
 
7705
	/* NB: An MMIO update of the plane base pointer will also
8332
	/* NB: An MMIO update of the plane base pointer will also
7706
	 * generate a page-flip completion irq, i.e. every modeset
8333
	 * generate a page-flip completion irq, i.e. every modeset
7707
	 * is also accompanied by a spurious intel_prepare_page_flip().
8334
	 * is also accompanied by a spurious intel_prepare_page_flip().
7708
	 */
8335
	 */
7709
	spin_lock_irqsave(&dev->event_lock, flags);
8336
	spin_lock_irqsave(&dev->event_lock, flags);
7710
	if (intel_crtc->unpin_work)
8337
	if (intel_crtc->unpin_work)
7711
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
8338
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
7712
	spin_unlock_irqrestore(&dev->event_lock, flags);
8339
	spin_unlock_irqrestore(&dev->event_lock, flags);
7713
}
8340
}
7714
 
8341
 
7715
inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
8342
inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7716
{
8343
{
7717
	/* Ensure that the work item is consistent when activating it ... */
8344
	/* Ensure that the work item is consistent when activating it ... */
7718
	smp_wmb();
8345
	smp_wmb();
7719
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
8346
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
7720
	/* and that it is marked active as soon as the irq could fire. */
8347
	/* and that it is marked active as soon as the irq could fire. */
7721
	smp_wmb();
8348
	smp_wmb();
7722
}
8349
}
7723
 
8350
 
7724
static int intel_gen2_queue_flip(struct drm_device *dev,
8351
static int intel_gen2_queue_flip(struct drm_device *dev,
7725
				 struct drm_crtc *crtc,
8352
				 struct drm_crtc *crtc,
7726
				 struct drm_framebuffer *fb,
8353
				 struct drm_framebuffer *fb,
7727
				 struct drm_i915_gem_object *obj,
8354
				 struct drm_i915_gem_object *obj,
7728
				 uint32_t flags)
8355
				 uint32_t flags)
7729
{
8356
{
7730
	struct drm_i915_private *dev_priv = dev->dev_private;
8357
	struct drm_i915_private *dev_priv = dev->dev_private;
7731
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8358
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7732
	u32 flip_mask;
8359
	u32 flip_mask;
7733
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8360
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7734
	int ret;
8361
	int ret;
7735
 
8362
 
7736
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8363
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7737
	if (ret)
8364
	if (ret)
7738
		goto err;
8365
		goto err;
7739
 
8366
 
7740
	ret = intel_ring_begin(ring, 6);
8367
	ret = intel_ring_begin(ring, 6);
7741
	if (ret)
8368
	if (ret)
7742
		goto err_unpin;
8369
		goto err_unpin;
7743
 
8370
 
7744
	/* Can't queue multiple flips, so wait for the previous
8371
	/* Can't queue multiple flips, so wait for the previous
7745
	 * one to finish before executing the next.
8372
	 * one to finish before executing the next.
7746
	 */
8373
	 */
7747
	if (intel_crtc->plane)
8374
	if (intel_crtc->plane)
7748
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
8375
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7749
	else
8376
	else
7750
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
8377
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7751
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
8378
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
7752
	intel_ring_emit(ring, MI_NOOP);
8379
	intel_ring_emit(ring, MI_NOOP);
7753
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
8380
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
7754
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8381
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7755
	intel_ring_emit(ring, fb->pitches[0]);
8382
	intel_ring_emit(ring, fb->pitches[0]);
7756
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8383
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7757
	intel_ring_emit(ring, 0); /* aux display base address, unused */
8384
	intel_ring_emit(ring, 0); /* aux display base address, unused */
7758
 
8385
 
7759
	intel_mark_page_flip_active(intel_crtc);
8386
	intel_mark_page_flip_active(intel_crtc);
7760
	intel_ring_advance(ring);
8387
	__intel_ring_advance(ring);
7761
	return 0;
8388
	return 0;
7762
 
8389
 
7763
err_unpin:
8390
err_unpin:
7764
	intel_unpin_fb_obj(obj);
8391
	intel_unpin_fb_obj(obj);
7765
err:
8392
err:
7766
	return ret;
8393
	return ret;
7767
}
8394
}
7768
 
8395
 
7769
static int intel_gen3_queue_flip(struct drm_device *dev,
8396
static int intel_gen3_queue_flip(struct drm_device *dev,
7770
				 struct drm_crtc *crtc,
8397
				 struct drm_crtc *crtc,
7771
				 struct drm_framebuffer *fb,
8398
				 struct drm_framebuffer *fb,
7772
				 struct drm_i915_gem_object *obj,
8399
				 struct drm_i915_gem_object *obj,
7773
				 uint32_t flags)
8400
				 uint32_t flags)
7774
{
8401
{
7775
	struct drm_i915_private *dev_priv = dev->dev_private;
8402
	struct drm_i915_private *dev_priv = dev->dev_private;
7776
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8403
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7777
	u32 flip_mask;
8404
	u32 flip_mask;
7778
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8405
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7779
	int ret;
8406
	int ret;
7780
 
8407
 
7781
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8408
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7782
	if (ret)
8409
	if (ret)
7783
		goto err;
8410
		goto err;
7784
 
8411
 
7785
	ret = intel_ring_begin(ring, 6);
8412
	ret = intel_ring_begin(ring, 6);
7786
	if (ret)
8413
	if (ret)
7787
		goto err_unpin;
8414
		goto err_unpin;
7788
 
8415
 
7789
	if (intel_crtc->plane)
8416
	if (intel_crtc->plane)
7790
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
8417
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7791
	else
8418
	else
7792
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
8419
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7793
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
8420
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
7794
	intel_ring_emit(ring, MI_NOOP);
8421
	intel_ring_emit(ring, MI_NOOP);
7795
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
8422
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
7796
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8423
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7797
	intel_ring_emit(ring, fb->pitches[0]);
8424
	intel_ring_emit(ring, fb->pitches[0]);
7798
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8425
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7799
	intel_ring_emit(ring, MI_NOOP);
8426
	intel_ring_emit(ring, MI_NOOP);
7800
 
8427
 
7801
	intel_mark_page_flip_active(intel_crtc);
8428
	intel_mark_page_flip_active(intel_crtc);
7802
	intel_ring_advance(ring);
8429
	__intel_ring_advance(ring);
7803
	return 0;
8430
	return 0;
7804
 
8431
 
7805
err_unpin:
8432
err_unpin:
7806
	intel_unpin_fb_obj(obj);
8433
	intel_unpin_fb_obj(obj);
7807
err:
8434
err:
7808
	return ret;
8435
	return ret;
7809
}
8436
}
7810
 
8437
 
7811
static int intel_gen4_queue_flip(struct drm_device *dev,
8438
static int intel_gen4_queue_flip(struct drm_device *dev,
7812
				 struct drm_crtc *crtc,
8439
				 struct drm_crtc *crtc,
7813
				 struct drm_framebuffer *fb,
8440
				 struct drm_framebuffer *fb,
7814
				 struct drm_i915_gem_object *obj,
8441
				 struct drm_i915_gem_object *obj,
7815
				 uint32_t flags)
8442
				 uint32_t flags)
7816
{
8443
{
7817
	struct drm_i915_private *dev_priv = dev->dev_private;
8444
	struct drm_i915_private *dev_priv = dev->dev_private;
7818
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8445
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7819
	uint32_t pf, pipesrc;
8446
	uint32_t pf, pipesrc;
7820
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8447
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7821
	int ret;
8448
	int ret;
7822
 
8449
 
7823
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8450
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7824
	if (ret)
8451
	if (ret)
7825
		goto err;
8452
		goto err;
7826
 
8453
 
7827
	ret = intel_ring_begin(ring, 4);
8454
	ret = intel_ring_begin(ring, 4);
7828
	if (ret)
8455
	if (ret)
7829
		goto err_unpin;
8456
		goto err_unpin;
7830
 
8457
 
7831
	/* i965+ uses the linear or tiled offsets from the
8458
	/* i965+ uses the linear or tiled offsets from the
7832
	 * Display Registers (which do not change across a page-flip)
8459
	 * Display Registers (which do not change across a page-flip)
7833
	 * so we need only reprogram the base address.
8460
	 * so we need only reprogram the base address.
7834
	 */
8461
	 */
7835
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
8462
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
7836
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8463
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7837
	intel_ring_emit(ring, fb->pitches[0]);
8464
	intel_ring_emit(ring, fb->pitches[0]);
7838
	intel_ring_emit(ring,
8465
	intel_ring_emit(ring,
7839
			(i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
8466
			(i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
7840
			obj->tiling_mode);
8467
			obj->tiling_mode);
7841
 
8468
 
7842
	/* XXX Enabling the panel-fitter across page-flip is so far
8469
	/* XXX Enabling the panel-fitter across page-flip is so far
7843
	 * untested on non-native modes, so ignore it for now.
8470
	 * untested on non-native modes, so ignore it for now.
7844
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
8471
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7845
	 */
8472
	 */
7846
	pf = 0;
8473
	pf = 0;
7847
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
8474
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7848
	intel_ring_emit(ring, pf | pipesrc);
8475
	intel_ring_emit(ring, pf | pipesrc);
7849
 
8476
 
7850
	intel_mark_page_flip_active(intel_crtc);
8477
	intel_mark_page_flip_active(intel_crtc);
7851
	intel_ring_advance(ring);
8478
	__intel_ring_advance(ring);
7852
	return 0;
8479
	return 0;
7853
 
8480
 
7854
err_unpin:
8481
err_unpin:
7855
	intel_unpin_fb_obj(obj);
8482
	intel_unpin_fb_obj(obj);
7856
err:
8483
err:
7857
	return ret;
8484
	return ret;
7858
}
8485
}
7859
 
8486
 
7860
static int intel_gen6_queue_flip(struct drm_device *dev,
8487
static int intel_gen6_queue_flip(struct drm_device *dev,
7861
				 struct drm_crtc *crtc,
8488
				 struct drm_crtc *crtc,
7862
				 struct drm_framebuffer *fb,
8489
				 struct drm_framebuffer *fb,
7863
				 struct drm_i915_gem_object *obj,
8490
				 struct drm_i915_gem_object *obj,
7864
				 uint32_t flags)
8491
				 uint32_t flags)
7865
{
8492
{
7866
	struct drm_i915_private *dev_priv = dev->dev_private;
8493
	struct drm_i915_private *dev_priv = dev->dev_private;
7867
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8494
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7868
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8495
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7869
	uint32_t pf, pipesrc;
8496
	uint32_t pf, pipesrc;
7870
	int ret;
8497
	int ret;
7871
 
8498
 
7872
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8499
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7873
	if (ret)
8500
	if (ret)
7874
		goto err;
8501
		goto err;
7875
 
8502
 
7876
	ret = intel_ring_begin(ring, 4);
8503
	ret = intel_ring_begin(ring, 4);
7877
	if (ret)
8504
	if (ret)
7878
		goto err_unpin;
8505
		goto err_unpin;
7879
 
8506
 
7880
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
8507
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
7881
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8508
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7882
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
8509
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
7883
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8510
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7884
 
8511
 
7885
	/* Contrary to the suggestions in the documentation,
8512
	/* Contrary to the suggestions in the documentation,
7886
	 * "Enable Panel Fitter" does not seem to be required when page
8513
	 * "Enable Panel Fitter" does not seem to be required when page
7887
	 * flipping with a non-native mode, and worse causes a normal
8514
	 * flipping with a non-native mode, and worse causes a normal
7888
	 * modeset to fail.
8515
	 * modeset to fail.
7889
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
8516
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7890
	 */
8517
	 */
7891
	pf = 0;
8518
	pf = 0;
7892
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
8519
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7893
	intel_ring_emit(ring, pf | pipesrc);
8520
	intel_ring_emit(ring, pf | pipesrc);
7894
 
8521
 
7895
	intel_mark_page_flip_active(intel_crtc);
8522
	intel_mark_page_flip_active(intel_crtc);
7896
	intel_ring_advance(ring);
8523
	__intel_ring_advance(ring);
7897
	return 0;
8524
	return 0;
7898
 
8525
 
7899
err_unpin:
8526
err_unpin:
7900
	intel_unpin_fb_obj(obj);
8527
	intel_unpin_fb_obj(obj);
7901
err:
8528
err:
7902
	return ret;
8529
	return ret;
7903
}
8530
}
7904
 
8531
 
7905
static int intel_gen7_queue_flip(struct drm_device *dev,
8532
static int intel_gen7_queue_flip(struct drm_device *dev,
7906
				 struct drm_crtc *crtc,
8533
				 struct drm_crtc *crtc,
7907
				 struct drm_framebuffer *fb,
8534
				 struct drm_framebuffer *fb,
7908
				 struct drm_i915_gem_object *obj,
8535
				 struct drm_i915_gem_object *obj,
7909
				 uint32_t flags)
8536
				 uint32_t flags)
7910
{
8537
{
7911
	struct drm_i915_private *dev_priv = dev->dev_private;
8538
	struct drm_i915_private *dev_priv = dev->dev_private;
7912
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8539
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7913
	struct intel_ring_buffer *ring;
8540
	struct intel_ring_buffer *ring;
7914
	uint32_t plane_bit = 0;
8541
	uint32_t plane_bit = 0;
7915
	int len, ret;
8542
	int len, ret;
7916
 
8543
 
7917
	ring = obj->ring;
8544
	ring = obj->ring;
7918
	if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
8545
	if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
7919
		ring = &dev_priv->ring[BCS];
8546
		ring = &dev_priv->ring[BCS];
7920
 
8547
 
7921
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8548
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7922
	if (ret)
8549
	if (ret)
7923
		goto err;
8550
		goto err;
7924
 
8551
 
7925
	switch(intel_crtc->plane) {
8552
	switch(intel_crtc->plane) {
7926
	case PLANE_A:
8553
	case PLANE_A:
7927
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
8554
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
7928
		break;
8555
		break;
7929
	case PLANE_B:
8556
	case PLANE_B:
7930
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
8557
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
7931
		break;
8558
		break;
7932
	case PLANE_C:
8559
	case PLANE_C:
7933
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
8560
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
7934
		break;
8561
		break;
7935
	default:
8562
	default:
7936
		WARN_ONCE(1, "unknown plane in flip command\n");
8563
		WARN_ONCE(1, "unknown plane in flip command\n");
7937
		ret = -ENODEV;
8564
		ret = -ENODEV;
7938
		goto err_unpin;
8565
		goto err_unpin;
7939
	}
8566
	}
7940
 
8567
 
7941
	len = 4;
8568
	len = 4;
7942
	if (ring->id == RCS)
8569
	if (ring->id == RCS)
7943
		len += 6;
8570
		len += 6;
7944
 
8571
 
7945
	ret = intel_ring_begin(ring, len);
8572
	ret = intel_ring_begin(ring, len);
7946
	if (ret)
8573
	if (ret)
7947
		goto err_unpin;
8574
		goto err_unpin;
7948
 
8575
 
7949
	/* Unmask the flip-done completion message. Note that the bspec says that
8576
	/* Unmask the flip-done completion message. Note that the bspec says that
7950
	 * we should do this for both the BCS and RCS, and that we must not unmask
8577
	 * we should do this for both the BCS and RCS, and that we must not unmask
7951
	 * more than one flip event at any time (or ensure that one flip message
8578
	 * more than one flip event at any time (or ensure that one flip message
7952
	 * can be sent by waiting for flip-done prior to queueing new flips).
8579
	 * can be sent by waiting for flip-done prior to queueing new flips).
7953
	 * Experimentation says that BCS works despite DERRMR masking all
8580
	 * Experimentation says that BCS works despite DERRMR masking all
7954
	 * flip-done completion events and that unmasking all planes at once
8581
	 * flip-done completion events and that unmasking all planes at once
7955
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
8582
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
7956
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
8583
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
7957
	 */
8584
	 */
7958
	if (ring->id == RCS) {
8585
	if (ring->id == RCS) {
7959
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
8586
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
7960
		intel_ring_emit(ring, DERRMR);
8587
		intel_ring_emit(ring, DERRMR);
7961
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8588
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
7962
					DERRMR_PIPEB_PRI_FLIP_DONE |
8589
					DERRMR_PIPEB_PRI_FLIP_DONE |
7963
					DERRMR_PIPEC_PRI_FLIP_DONE));
8590
					DERRMR_PIPEC_PRI_FLIP_DONE));
7964
		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
8591
		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
-
 
8592
				MI_SRM_LRM_GLOBAL_GTT);
7965
		intel_ring_emit(ring, DERRMR);
8593
		intel_ring_emit(ring, DERRMR);
7966
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8594
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
7967
	}
8595
	}
7968
 
8596
 
7969
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
8597
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
7970
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
8598
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7971
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8599
	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7972
	intel_ring_emit(ring, (MI_NOOP));
8600
	intel_ring_emit(ring, (MI_NOOP));
7973
 
8601
 
7974
	intel_mark_page_flip_active(intel_crtc);
8602
	intel_mark_page_flip_active(intel_crtc);
7975
	intel_ring_advance(ring);
8603
	__intel_ring_advance(ring);
7976
	return 0;
8604
	return 0;
7977
 
8605
 
7978
err_unpin:
8606
err_unpin:
7979
	intel_unpin_fb_obj(obj);
8607
	intel_unpin_fb_obj(obj);
7980
err:
8608
err:
7981
	return ret;
8609
	return ret;
7982
}
8610
}
7983
 
8611
 
7984
static int intel_default_queue_flip(struct drm_device *dev,
8612
static int intel_default_queue_flip(struct drm_device *dev,
7985
				    struct drm_crtc *crtc,
8613
				    struct drm_crtc *crtc,
7986
				    struct drm_framebuffer *fb,
8614
				    struct drm_framebuffer *fb,
7987
				    struct drm_i915_gem_object *obj,
8615
				    struct drm_i915_gem_object *obj,
7988
				    uint32_t flags)
8616
				    uint32_t flags)
7989
{
8617
{
7990
	return -ENODEV;
8618
	return -ENODEV;
7991
}
8619
}
7992
 
8620
 
7993
static int intel_crtc_page_flip(struct drm_crtc *crtc,
8621
static int intel_crtc_page_flip(struct drm_crtc *crtc,
7994
				struct drm_framebuffer *fb,
8622
				struct drm_framebuffer *fb,
7995
				struct drm_pending_vblank_event *event,
8623
				struct drm_pending_vblank_event *event,
7996
				uint32_t page_flip_flags)
8624
				uint32_t page_flip_flags)
7997
{
8625
{
7998
	struct drm_device *dev = crtc->dev;
8626
	struct drm_device *dev = crtc->dev;
7999
	struct drm_i915_private *dev_priv = dev->dev_private;
8627
	struct drm_i915_private *dev_priv = dev->dev_private;
8000
	struct drm_framebuffer *old_fb = crtc->fb;
8628
	struct drm_framebuffer *old_fb = crtc->fb;
8001
	struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
8629
	struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
8002
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8630
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8003
	struct intel_unpin_work *work;
8631
	struct intel_unpin_work *work;
8004
	unsigned long flags;
8632
	unsigned long flags;
8005
	int ret;
8633
	int ret;
8006
 
8634
 
8007
	/* Can't change pixel format via MI display flips. */
8635
	/* Can't change pixel format via MI display flips. */
8008
	if (fb->pixel_format != crtc->fb->pixel_format)
8636
	if (fb->pixel_format != crtc->fb->pixel_format)
8009
		return -EINVAL;
8637
		return -EINVAL;
8010
 
8638
 
8011
	/*
8639
	/*
8012
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
8640
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
8013
	 * Note that pitch changes could also affect these register.
8641
	 * Note that pitch changes could also affect these register.
8014
	 */
8642
	 */
8015
	if (INTEL_INFO(dev)->gen > 3 &&
8643
	if (INTEL_INFO(dev)->gen > 3 &&
8016
	    (fb->offsets[0] != crtc->fb->offsets[0] ||
8644
	    (fb->offsets[0] != crtc->fb->offsets[0] ||
8017
	     fb->pitches[0] != crtc->fb->pitches[0]))
8645
	     fb->pitches[0] != crtc->fb->pitches[0]))
8018
		return -EINVAL;
8646
		return -EINVAL;
8019
 
8647
 
8020
	work = kzalloc(sizeof *work, GFP_KERNEL);
8648
	work = kzalloc(sizeof(*work), GFP_KERNEL);
8021
	if (work == NULL)
8649
	if (work == NULL)
8022
		return -ENOMEM;
8650
		return -ENOMEM;
8023
 
8651
 
8024
	work->event = event;
8652
	work->event = event;
8025
	work->crtc = crtc;
8653
	work->crtc = crtc;
8026
	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
8654
	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
8027
	INIT_WORK(&work->work, intel_unpin_work_fn);
8655
	INIT_WORK(&work->work, intel_unpin_work_fn);
8028
 
8656
 
8029
	ret = drm_vblank_get(dev, intel_crtc->pipe);
8657
	ret = drm_vblank_get(dev, intel_crtc->pipe);
8030
	if (ret)
8658
	if (ret)
8031
		goto free_work;
8659
		goto free_work;
8032
 
8660
 
8033
	/* We borrow the event spin lock for protecting unpin_work */
8661
	/* We borrow the event spin lock for protecting unpin_work */
8034
	spin_lock_irqsave(&dev->event_lock, flags);
8662
	spin_lock_irqsave(&dev->event_lock, flags);
8035
	if (intel_crtc->unpin_work) {
8663
	if (intel_crtc->unpin_work) {
8036
		spin_unlock_irqrestore(&dev->event_lock, flags);
8664
		spin_unlock_irqrestore(&dev->event_lock, flags);
8037
		kfree(work);
8665
		kfree(work);
8038
		drm_vblank_put(dev, intel_crtc->pipe);
8666
		drm_vblank_put(dev, intel_crtc->pipe);
8039
 
8667
 
8040
		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
8668
		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
8041
		return -EBUSY;
8669
		return -EBUSY;
8042
	}
8670
	}
8043
	intel_crtc->unpin_work = work;
8671
	intel_crtc->unpin_work = work;
8044
	spin_unlock_irqrestore(&dev->event_lock, flags);
8672
	spin_unlock_irqrestore(&dev->event_lock, flags);
8045
 
8673
 
8046
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
8674
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
8047
		flush_workqueue(dev_priv->wq);
8675
		flush_workqueue(dev_priv->wq);
8048
 
8676
 
8049
	ret = i915_mutex_lock_interruptible(dev);
8677
	ret = i915_mutex_lock_interruptible(dev);
8050
	if (ret)
8678
	if (ret)
8051
		goto cleanup;
8679
		goto cleanup;
8052
 
8680
 
8053
	/* Reference the objects for the scheduled work. */
8681
	/* Reference the objects for the scheduled work. */
8054
	drm_gem_object_reference(&work->old_fb_obj->base);
8682
	drm_gem_object_reference(&work->old_fb_obj->base);
8055
	drm_gem_object_reference(&obj->base);
8683
	drm_gem_object_reference(&obj->base);
8056
 
8684
 
8057
	crtc->fb = fb;
8685
	crtc->fb = fb;
8058
 
8686
 
8059
	work->pending_flip_obj = obj;
8687
	work->pending_flip_obj = obj;
8060
 
8688
 
8061
	work->enable_stall_check = true;
8689
	work->enable_stall_check = true;
8062
 
8690
 
8063
	atomic_inc(&intel_crtc->unpin_work_count);
8691
	atomic_inc(&intel_crtc->unpin_work_count);
8064
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
8692
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
8065
 
8693
 
8066
	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
8694
	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
8067
	if (ret)
8695
	if (ret)
8068
		goto cleanup_pending;
8696
		goto cleanup_pending;
8069
 
8697
 
8070
	intel_disable_fbc(dev);
8698
	intel_disable_fbc(dev);
8071
	intel_mark_fb_busy(obj, NULL);
8699
	intel_mark_fb_busy(obj, NULL);
8072
	mutex_unlock(&dev->struct_mutex);
8700
	mutex_unlock(&dev->struct_mutex);
8073
 
8701
 
8074
	trace_i915_flip_request(intel_crtc->plane, obj);
8702
	trace_i915_flip_request(intel_crtc->plane, obj);
8075
 
8703
 
8076
	return 0;
8704
	return 0;
8077
 
8705
 
8078
cleanup_pending:
8706
cleanup_pending:
8079
	atomic_dec(&intel_crtc->unpin_work_count);
8707
	atomic_dec(&intel_crtc->unpin_work_count);
8080
	crtc->fb = old_fb;
8708
	crtc->fb = old_fb;
8081
	drm_gem_object_unreference(&work->old_fb_obj->base);
8709
	drm_gem_object_unreference(&work->old_fb_obj->base);
8082
	drm_gem_object_unreference(&obj->base);
8710
	drm_gem_object_unreference(&obj->base);
8083
	mutex_unlock(&dev->struct_mutex);
8711
	mutex_unlock(&dev->struct_mutex);
8084
 
8712
 
8085
cleanup:
8713
cleanup:
8086
	spin_lock_irqsave(&dev->event_lock, flags);
8714
	spin_lock_irqsave(&dev->event_lock, flags);
8087
	intel_crtc->unpin_work = NULL;
8715
	intel_crtc->unpin_work = NULL;
8088
	spin_unlock_irqrestore(&dev->event_lock, flags);
8716
	spin_unlock_irqrestore(&dev->event_lock, flags);
8089
 
8717
 
8090
	drm_vblank_put(dev, intel_crtc->pipe);
8718
	drm_vblank_put(dev, intel_crtc->pipe);
8091
free_work:
8719
free_work:
8092
	kfree(work);
8720
	kfree(work);
8093
 
8721
 
8094
	return ret;
8722
	return ret;
8095
}
8723
}
8096
#endif
8724
#endif
8097
 
8725
 
8098
static struct drm_crtc_helper_funcs intel_helper_funcs = {
8726
static struct drm_crtc_helper_funcs intel_helper_funcs = {
8099
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
8727
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
8100
	.load_lut = intel_crtc_load_lut,
8728
	.load_lut = intel_crtc_load_lut,
8101
};
8729
};
8102
 
-
 
8103
static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
-
 
8104
				  struct drm_crtc *crtc)
-
 
8105
{
-
 
8106
	struct drm_device *dev;
-
 
8107
	struct drm_crtc *tmp;
-
 
8108
	int crtc_mask = 1;
-
 
8109
 
-
 
8110
	WARN(!crtc, "checking null crtc?\n");
-
 
8111
 
-
 
8112
	dev = crtc->dev;
-
 
8113
 
-
 
8114
	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
-
 
8115
		if (tmp == crtc)
-
 
8116
			break;
-
 
8117
		crtc_mask <<= 1;
-
 
8118
	}
-
 
8119
 
-
 
8120
	if (encoder->possible_crtcs & crtc_mask)
-
 
8121
		return true;
-
 
8122
	return false;
-
 
8123
}
-
 
8124
 
8730
 
8125
/**
8731
/**
8126
 * intel_modeset_update_staged_output_state
8732
 * intel_modeset_update_staged_output_state
8127
 *
8733
 *
8128
 * Updates the staged output configuration state, e.g. after we've read out the
8734
 * Updates the staged output configuration state, e.g. after we've read out the
8129
 * current hw state.
8735
 * current hw state.
8130
 */
8736
 */
8131
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8737
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8132
{
8738
{
8133
	struct intel_encoder *encoder;
8739
	struct intel_encoder *encoder;
8134
	struct intel_connector *connector;
8740
	struct intel_connector *connector;
8135
 
8741
 
8136
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8742
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8137
			    base.head) {
8743
			    base.head) {
8138
		connector->new_encoder =
8744
		connector->new_encoder =
8139
			to_intel_encoder(connector->base.encoder);
8745
			to_intel_encoder(connector->base.encoder);
8140
	}
8746
	}
8141
 
8747
 
8142
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8748
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8143
			    base.head) {
8749
			    base.head) {
8144
		encoder->new_crtc =
8750
		encoder->new_crtc =
8145
			to_intel_crtc(encoder->base.crtc);
8751
			to_intel_crtc(encoder->base.crtc);
8146
	}
8752
	}
8147
}
8753
}
8148
 
8754
 
8149
/**
8755
/**
8150
 * intel_modeset_commit_output_state
8756
 * intel_modeset_commit_output_state
8151
 *
8757
 *
8152
 * This function copies the stage display pipe configuration to the real one.
8758
 * This function copies the stage display pipe configuration to the real one.
8153
 */
8759
 */
8154
static void intel_modeset_commit_output_state(struct drm_device *dev)
8760
static void intel_modeset_commit_output_state(struct drm_device *dev)
8155
{
8761
{
8156
	struct intel_encoder *encoder;
8762
	struct intel_encoder *encoder;
8157
	struct intel_connector *connector;
8763
	struct intel_connector *connector;
8158
 
8764
 
8159
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8765
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8160
			    base.head) {
8766
			    base.head) {
8161
		connector->base.encoder = &connector->new_encoder->base;
8767
		connector->base.encoder = &connector->new_encoder->base;
8162
	}
8768
	}
8163
 
8769
 
8164
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8770
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8165
			    base.head) {
8771
			    base.head) {
8166
		encoder->base.crtc = &encoder->new_crtc->base;
8772
		encoder->base.crtc = &encoder->new_crtc->base;
8167
	}
8773
	}
8168
}
8774
}
8169
 
8775
 
8170
static void
8776
static void
8171
connected_sink_compute_bpp(struct intel_connector * connector,
8777
connected_sink_compute_bpp(struct intel_connector * connector,
8172
			   struct intel_crtc_config *pipe_config)
8778
			   struct intel_crtc_config *pipe_config)
8173
{
8779
{
8174
	int bpp = pipe_config->pipe_bpp;
8780
	int bpp = pipe_config->pipe_bpp;
8175
 
8781
 
8176
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
8782
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
8177
		connector->base.base.id,
8783
		connector->base.base.id,
8178
		drm_get_connector_name(&connector->base));
8784
		drm_get_connector_name(&connector->base));
8179
 
8785
 
8180
	/* Don't use an invalid EDID bpc value */
8786
	/* Don't use an invalid EDID bpc value */
8181
	if (connector->base.display_info.bpc &&
8787
	if (connector->base.display_info.bpc &&
8182
	    connector->base.display_info.bpc * 3 < bpp) {
8788
	    connector->base.display_info.bpc * 3 < bpp) {
8183
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
8789
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
8184
			      bpp, connector->base.display_info.bpc*3);
8790
			      bpp, connector->base.display_info.bpc*3);
8185
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
8791
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
8186
	}
8792
	}
8187
 
8793
 
8188
	/* Clamp bpp to 8 on screens without EDID 1.4 */
8794
	/* Clamp bpp to 8 on screens without EDID 1.4 */
8189
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
8795
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
8190
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
8796
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
8191
			      bpp);
8797
			      bpp);
8192
		pipe_config->pipe_bpp = 24;
8798
		pipe_config->pipe_bpp = 24;
8193
	}
8799
	}
8194
}
8800
}
8195
 
8801
 
8196
static int
8802
static int
8197
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
8803
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
8198
		    struct drm_framebuffer *fb,
8804
		    struct drm_framebuffer *fb,
8199
		    struct intel_crtc_config *pipe_config)
8805
		    struct intel_crtc_config *pipe_config)
8200
{
8806
{
8201
	struct drm_device *dev = crtc->base.dev;
8807
	struct drm_device *dev = crtc->base.dev;
8202
	struct intel_connector *connector;
8808
	struct intel_connector *connector;
8203
	int bpp;
8809
	int bpp;
8204
 
8810
 
8205
	switch (fb->pixel_format) {
8811
	switch (fb->pixel_format) {
8206
	case DRM_FORMAT_C8:
8812
	case DRM_FORMAT_C8:
8207
		bpp = 8*3; /* since we go through a colormap */
8813
		bpp = 8*3; /* since we go through a colormap */
8208
		break;
8814
		break;
8209
	case DRM_FORMAT_XRGB1555:
8815
	case DRM_FORMAT_XRGB1555:
8210
	case DRM_FORMAT_ARGB1555:
8816
	case DRM_FORMAT_ARGB1555:
8211
		/* checked in intel_framebuffer_init already */
8817
		/* checked in intel_framebuffer_init already */
8212
		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
8818
		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
8213
			return -EINVAL;
8819
			return -EINVAL;
8214
	case DRM_FORMAT_RGB565:
8820
	case DRM_FORMAT_RGB565:
8215
		bpp = 6*3; /* min is 18bpp */
8821
		bpp = 6*3; /* min is 18bpp */
8216
		break;
8822
		break;
8217
	case DRM_FORMAT_XBGR8888:
8823
	case DRM_FORMAT_XBGR8888:
8218
	case DRM_FORMAT_ABGR8888:
8824
	case DRM_FORMAT_ABGR8888:
8219
		/* checked in intel_framebuffer_init already */
8825
		/* checked in intel_framebuffer_init already */
8220
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8826
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8221
			return -EINVAL;
8827
			return -EINVAL;
8222
	case DRM_FORMAT_XRGB8888:
8828
	case DRM_FORMAT_XRGB8888:
8223
	case DRM_FORMAT_ARGB8888:
8829
	case DRM_FORMAT_ARGB8888:
8224
		bpp = 8*3;
8830
		bpp = 8*3;
8225
		break;
8831
		break;
8226
	case DRM_FORMAT_XRGB2101010:
8832
	case DRM_FORMAT_XRGB2101010:
8227
	case DRM_FORMAT_ARGB2101010:
8833
	case DRM_FORMAT_ARGB2101010:
8228
	case DRM_FORMAT_XBGR2101010:
8834
	case DRM_FORMAT_XBGR2101010:
8229
	case DRM_FORMAT_ABGR2101010:
8835
	case DRM_FORMAT_ABGR2101010:
8230
		/* checked in intel_framebuffer_init already */
8836
		/* checked in intel_framebuffer_init already */
8231
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8837
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8232
			return -EINVAL;
8838
			return -EINVAL;
8233
		bpp = 10*3;
8839
		bpp = 10*3;
8234
		break;
8840
		break;
8235
	/* TODO: gen4+ supports 16 bpc floating point, too. */
8841
	/* TODO: gen4+ supports 16 bpc floating point, too. */
8236
	default:
8842
	default:
8237
		DRM_DEBUG_KMS("unsupported depth\n");
8843
		DRM_DEBUG_KMS("unsupported depth\n");
8238
		return -EINVAL;
8844
		return -EINVAL;
8239
	}
8845
	}
8240
 
8846
 
8241
	pipe_config->pipe_bpp = bpp;
8847
	pipe_config->pipe_bpp = bpp;
8242
 
8848
 
8243
	/* Clamp display bpp to EDID value */
8849
	/* Clamp display bpp to EDID value */
8244
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8850
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8245
			    base.head) {
8851
			    base.head) {
8246
		if (!connector->new_encoder ||
8852
		if (!connector->new_encoder ||
8247
		    connector->new_encoder->new_crtc != crtc)
8853
		    connector->new_encoder->new_crtc != crtc)
8248
			continue;
8854
			continue;
8249
 
8855
 
8250
		connected_sink_compute_bpp(connector, pipe_config);
8856
		connected_sink_compute_bpp(connector, pipe_config);
8251
	}
8857
	}
8252
 
8858
 
8253
	return bpp;
8859
	return bpp;
8254
}
8860
}
-
 
8861
 
-
 
8862
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
-
 
8863
{
-
 
8864
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
-
 
8865
			"type: 0x%x flags: 0x%x\n",
-
 
8866
		mode->crtc_clock,
-
 
8867
		mode->crtc_hdisplay, mode->crtc_hsync_start,
-
 
8868
		mode->crtc_hsync_end, mode->crtc_htotal,
-
 
8869
		mode->crtc_vdisplay, mode->crtc_vsync_start,
-
 
8870
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
-
 
8871
}
8255
 
8872
 
8256
static void intel_dump_pipe_config(struct intel_crtc *crtc,
8873
static void intel_dump_pipe_config(struct intel_crtc *crtc,
8257
				   struct intel_crtc_config *pipe_config,
8874
				   struct intel_crtc_config *pipe_config,
8258
				   const char *context)
8875
				   const char *context)
8259
{
8876
{
8260
	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
8877
	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
8261
		      context, pipe_name(crtc->pipe));
8878
		      context, pipe_name(crtc->pipe));
8262
 
8879
 
8263
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
8880
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
8264
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
8881
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
8265
		      pipe_config->pipe_bpp, pipe_config->dither);
8882
		      pipe_config->pipe_bpp, pipe_config->dither);
8266
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8883
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8267
		      pipe_config->has_pch_encoder,
8884
		      pipe_config->has_pch_encoder,
8268
		      pipe_config->fdi_lanes,
8885
		      pipe_config->fdi_lanes,
8269
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
8886
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
8270
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
8887
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
8271
		      pipe_config->fdi_m_n.tu);
8888
		      pipe_config->fdi_m_n.tu);
-
 
8889
	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
-
 
8890
		      pipe_config->has_dp_encoder,
-
 
8891
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
-
 
8892
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
-
 
8893
		      pipe_config->dp_m_n.tu);
8272
	DRM_DEBUG_KMS("requested mode:\n");
8894
	DRM_DEBUG_KMS("requested mode:\n");
8273
	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
8895
	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
8274
	DRM_DEBUG_KMS("adjusted mode:\n");
8896
	DRM_DEBUG_KMS("adjusted mode:\n");
8275
	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
8897
	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
-
 
8898
	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
-
 
8899
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
-
 
8900
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
-
 
8901
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
8276
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8902
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8277
		      pipe_config->gmch_pfit.control,
8903
		      pipe_config->gmch_pfit.control,
8278
		      pipe_config->gmch_pfit.pgm_ratios,
8904
		      pipe_config->gmch_pfit.pgm_ratios,
8279
		      pipe_config->gmch_pfit.lvds_border_bits);
8905
		      pipe_config->gmch_pfit.lvds_border_bits);
8280
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
8906
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
8281
		      pipe_config->pch_pfit.pos,
8907
		      pipe_config->pch_pfit.pos,
8282
		      pipe_config->pch_pfit.size,
8908
		      pipe_config->pch_pfit.size,
8283
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
8909
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
8284
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
8910
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
-
 
8911
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
8285
}
8912
}
8286
 
8913
 
8287
static bool check_encoder_cloning(struct drm_crtc *crtc)
8914
static bool check_encoder_cloning(struct drm_crtc *crtc)
8288
{
8915
{
8289
	int num_encoders = 0;
8916
	int num_encoders = 0;
8290
	bool uncloneable_encoders = false;
8917
	bool uncloneable_encoders = false;
8291
	struct intel_encoder *encoder;
8918
	struct intel_encoder *encoder;
8292
 
8919
 
8293
	list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
8920
	list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
8294
			    base.head) {
8921
			    base.head) {
8295
		if (&encoder->new_crtc->base != crtc)
8922
		if (&encoder->new_crtc->base != crtc)
8296
			continue;
8923
			continue;
8297
 
8924
 
8298
		num_encoders++;
8925
		num_encoders++;
8299
		if (!encoder->cloneable)
8926
		if (!encoder->cloneable)
8300
			uncloneable_encoders = true;
8927
			uncloneable_encoders = true;
8301
	}
8928
	}
8302
 
8929
 
8303
	return !(num_encoders > 1 && uncloneable_encoders);
8930
	return !(num_encoders > 1 && uncloneable_encoders);
8304
}
8931
}
8305
 
8932
 
8306
static struct intel_crtc_config *
8933
static struct intel_crtc_config *
8307
intel_modeset_pipe_config(struct drm_crtc *crtc,
8934
intel_modeset_pipe_config(struct drm_crtc *crtc,
8308
			  struct drm_framebuffer *fb,
8935
			  struct drm_framebuffer *fb,
8309
			    struct drm_display_mode *mode)
8936
			    struct drm_display_mode *mode)
8310
{
8937
{
8311
	struct drm_device *dev = crtc->dev;
8938
	struct drm_device *dev = crtc->dev;
8312
	struct intel_encoder *encoder;
8939
	struct intel_encoder *encoder;
8313
	struct intel_crtc_config *pipe_config;
8940
	struct intel_crtc_config *pipe_config;
8314
	int plane_bpp, ret = -EINVAL;
8941
	int plane_bpp, ret = -EINVAL;
8315
	bool retry = true;
8942
	bool retry = true;
8316
 
8943
 
8317
	if (!check_encoder_cloning(crtc)) {
8944
	if (!check_encoder_cloning(crtc)) {
8318
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
8945
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
8319
		return ERR_PTR(-EINVAL);
8946
		return ERR_PTR(-EINVAL);
8320
	}
8947
	}
8321
 
8948
 
8322
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8949
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8323
	if (!pipe_config)
8950
	if (!pipe_config)
8324
		return ERR_PTR(-ENOMEM);
8951
		return ERR_PTR(-ENOMEM);
8325
 
8952
 
8326
	drm_mode_copy(&pipe_config->adjusted_mode, mode);
8953
	drm_mode_copy(&pipe_config->adjusted_mode, mode);
8327
	drm_mode_copy(&pipe_config->requested_mode, mode);
8954
	drm_mode_copy(&pipe_config->requested_mode, mode);
-
 
8955
 
8328
	pipe_config->cpu_transcoder =
8956
	pipe_config->cpu_transcoder =
8329
		(enum transcoder) to_intel_crtc(crtc)->pipe;
8957
		(enum transcoder) to_intel_crtc(crtc)->pipe;
8330
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8958
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8331
 
8959
 
8332
	/*
8960
	/*
8333
	 * Sanitize sync polarity flags based on requested ones. If neither
8961
	 * Sanitize sync polarity flags based on requested ones. If neither
8334
	 * positive or negative polarity is requested, treat this as meaning
8962
	 * positive or negative polarity is requested, treat this as meaning
8335
	 * negative polarity.
8963
	 * negative polarity.
8336
	 */
8964
	 */
8337
	if (!(pipe_config->adjusted_mode.flags &
8965
	if (!(pipe_config->adjusted_mode.flags &
8338
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8966
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8339
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8967
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8340
 
8968
 
8341
	if (!(pipe_config->adjusted_mode.flags &
8969
	if (!(pipe_config->adjusted_mode.flags &
8342
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8970
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8343
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8971
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8344
 
8972
 
8345
	/* Compute a starting value for pipe_config->pipe_bpp taking the source
8973
	/* Compute a starting value for pipe_config->pipe_bpp taking the source
8346
	 * plane pixel format and any sink constraints into account. Returns the
8974
	 * plane pixel format and any sink constraints into account. Returns the
8347
	 * source plane bpp so that dithering can be selected on mismatches
8975
	 * source plane bpp so that dithering can be selected on mismatches
8348
	 * after encoders and crtc also have had their say. */
8976
	 * after encoders and crtc also have had their say. */
8349
	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
8977
	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
8350
					      fb, pipe_config);
8978
					      fb, pipe_config);
8351
	if (plane_bpp < 0)
8979
	if (plane_bpp < 0)
8352
		goto fail;
8980
		goto fail;
-
 
8981
 
-
 
8982
	/*
-
 
8983
	 * Determine the real pipe dimensions. Note that stereo modes can
-
 
8984
	 * increase the actual pipe size due to the frame doubling and
-
 
8985
	 * insertion of additional space for blanks between the frame. This
-
 
8986
	 * is stored in the crtc timings. We use the requested mode to do this
-
 
8987
	 * computation to clearly distinguish it from the adjusted mode, which
-
 
8988
	 * can be changed by the connectors in the below retry loop.
-
 
8989
	 */
-
 
8990
	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
-
 
8991
	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
-
 
8992
	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
8353
 
8993
 
8354
encoder_retry:
8994
encoder_retry:
8355
	/* Ensure the port clock defaults are reset when retrying. */
8995
	/* Ensure the port clock defaults are reset when retrying. */
8356
	pipe_config->port_clock = 0;
8996
	pipe_config->port_clock = 0;
8357
	pipe_config->pixel_multiplier = 1;
8997
	pipe_config->pixel_multiplier = 1;
8358
 
8998
 
8359
	/* Fill in default crtc timings, allow encoders to overwrite them. */
8999
	/* Fill in default crtc timings, allow encoders to overwrite them. */
8360
	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
9000
	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
8361
 
9001
 
8362
	/* Pass our mode to the connectors and the CRTC to give them a chance to
9002
	/* Pass our mode to the connectors and the CRTC to give them a chance to
8363
	 * adjust it according to limitations or connector properties, and also
9003
	 * adjust it according to limitations or connector properties, and also
8364
	 * a chance to reject the mode entirely.
9004
	 * a chance to reject the mode entirely.
8365
	 */
9005
	 */
8366
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9006
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8367
			    base.head) {
9007
			    base.head) {
8368
 
9008
 
8369
		if (&encoder->new_crtc->base != crtc)
9009
		if (&encoder->new_crtc->base != crtc)
8370
			continue;
9010
			continue;
8371
 
9011
 
8372
			if (!(encoder->compute_config(encoder, pipe_config))) {
9012
			if (!(encoder->compute_config(encoder, pipe_config))) {
8373
				DRM_DEBUG_KMS("Encoder config failure\n");
9013
				DRM_DEBUG_KMS("Encoder config failure\n");
8374
				goto fail;
9014
				goto fail;
8375
			}
9015
			}
8376
		}
9016
		}
8377
 
9017
 
8378
	/* Set default port clock if not overwritten by the encoder. Needs to be
9018
	/* Set default port clock if not overwritten by the encoder. Needs to be
8379
	 * done afterwards in case the encoder adjusts the mode. */
9019
	 * done afterwards in case the encoder adjusts the mode. */
8380
	if (!pipe_config->port_clock)
9020
	if (!pipe_config->port_clock)
8381
		pipe_config->port_clock = pipe_config->adjusted_mode.clock;
9021
		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
-
 
9022
			* pipe_config->pixel_multiplier;
8382
 
9023
 
8383
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
9024
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8384
	if (ret < 0) {
9025
	if (ret < 0) {
8385
		DRM_DEBUG_KMS("CRTC fixup failed\n");
9026
		DRM_DEBUG_KMS("CRTC fixup failed\n");
8386
		goto fail;
9027
		goto fail;
8387
	}
9028
	}
8388
 
9029
 
8389
	if (ret == RETRY) {
9030
	if (ret == RETRY) {
8390
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
9031
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
8391
			ret = -EINVAL;
9032
			ret = -EINVAL;
8392
			goto fail;
9033
			goto fail;
8393
		}
9034
		}
8394
 
9035
 
8395
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
9036
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
8396
		retry = false;
9037
		retry = false;
8397
		goto encoder_retry;
9038
		goto encoder_retry;
8398
	}
9039
	}
8399
 
9040
 
8400
	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
9041
	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
8401
	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
9042
	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
8402
		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
9043
		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
8403
 
9044
 
8404
	return pipe_config;
9045
	return pipe_config;
8405
fail:
9046
fail:
8406
	kfree(pipe_config);
9047
	kfree(pipe_config);
8407
	return ERR_PTR(ret);
9048
	return ERR_PTR(ret);
8408
}
9049
}
8409
 
9050
 
8410
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
9051
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
8411
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
9052
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
8412
static void
9053
static void
8413
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9054
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
8414
			     unsigned *prepare_pipes, unsigned *disable_pipes)
9055
			     unsigned *prepare_pipes, unsigned *disable_pipes)
8415
{
9056
{
8416
	struct intel_crtc *intel_crtc;
9057
	struct intel_crtc *intel_crtc;
8417
	struct drm_device *dev = crtc->dev;
9058
	struct drm_device *dev = crtc->dev;
8418
	struct intel_encoder *encoder;
9059
	struct intel_encoder *encoder;
8419
	struct intel_connector *connector;
9060
	struct intel_connector *connector;
8420
	struct drm_crtc *tmp_crtc;
9061
	struct drm_crtc *tmp_crtc;
8421
 
9062
 
8422
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
9063
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
8423
 
9064
 
8424
	/* Check which crtcs have changed outputs connected to them, these need
9065
	/* Check which crtcs have changed outputs connected to them, these need
8425
	 * to be part of the prepare_pipes mask. We don't (yet) support global
9066
	 * to be part of the prepare_pipes mask. We don't (yet) support global
8426
	 * modeset across multiple crtcs, so modeset_pipes will only have one
9067
	 * modeset across multiple crtcs, so modeset_pipes will only have one
8427
	 * bit set at most. */
9068
	 * bit set at most. */
8428
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9069
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8429
			    base.head) {
9070
			    base.head) {
8430
		if (connector->base.encoder == &connector->new_encoder->base)
9071
		if (connector->base.encoder == &connector->new_encoder->base)
8431
			continue;
9072
			continue;
8432
 
9073
 
8433
		if (connector->base.encoder) {
9074
		if (connector->base.encoder) {
8434
			tmp_crtc = connector->base.encoder->crtc;
9075
			tmp_crtc = connector->base.encoder->crtc;
8435
 
9076
 
8436
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9077
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
8437
		}
9078
		}
8438
 
9079
 
8439
		if (connector->new_encoder)
9080
		if (connector->new_encoder)
8440
			*prepare_pipes |=
9081
			*prepare_pipes |=
8441
				1 << connector->new_encoder->new_crtc->pipe;
9082
				1 << connector->new_encoder->new_crtc->pipe;
8442
	}
9083
	}
8443
 
9084
 
8444
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9085
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8445
			    base.head) {
9086
			    base.head) {
8446
		if (encoder->base.crtc == &encoder->new_crtc->base)
9087
		if (encoder->base.crtc == &encoder->new_crtc->base)
8447
			continue;
9088
			continue;
8448
 
9089
 
8449
		if (encoder->base.crtc) {
9090
		if (encoder->base.crtc) {
8450
			tmp_crtc = encoder->base.crtc;
9091
			tmp_crtc = encoder->base.crtc;
8451
 
9092
 
8452
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9093
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
8453
		}
9094
		}
8454
 
9095
 
8455
		if (encoder->new_crtc)
9096
		if (encoder->new_crtc)
8456
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
9097
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
8457
	}
9098
	}
8458
 
9099
 
8459
	/* Check for any pipes that will be fully disabled ... */
9100
	/* Check for any pipes that will be fully disabled ... */
8460
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9101
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
8461
			    base.head) {
9102
			    base.head) {
8462
		bool used = false;
9103
		bool used = false;
8463
 
9104
 
8464
		/* Don't try to disable disabled crtcs. */
9105
		/* Don't try to disable disabled crtcs. */
8465
		if (!intel_crtc->base.enabled)
9106
		if (!intel_crtc->base.enabled)
8466
			continue;
9107
			continue;
8467
 
9108
 
8468
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9109
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8469
				    base.head) {
9110
				    base.head) {
8470
			if (encoder->new_crtc == intel_crtc)
9111
			if (encoder->new_crtc == intel_crtc)
8471
				used = true;
9112
				used = true;
8472
		}
9113
		}
8473
 
9114
 
8474
		if (!used)
9115
		if (!used)
8475
			*disable_pipes |= 1 << intel_crtc->pipe;
9116
			*disable_pipes |= 1 << intel_crtc->pipe;
8476
	}
9117
	}
8477
 
9118
 
8478
 
9119
 
8479
	/* set_mode is also used to update properties on life display pipes. */
9120
	/* set_mode is also used to update properties on life display pipes. */
8480
	intel_crtc = to_intel_crtc(crtc);
9121
	intel_crtc = to_intel_crtc(crtc);
8481
	if (crtc->enabled)
9122
	if (crtc->enabled)
8482
		*prepare_pipes |= 1 << intel_crtc->pipe;
9123
		*prepare_pipes |= 1 << intel_crtc->pipe;
8483
 
9124
 
8484
	/*
9125
	/*
8485
	 * For simplicity do a full modeset on any pipe where the output routing
9126
	 * For simplicity do a full modeset on any pipe where the output routing
8486
	 * changed. We could be more clever, but that would require us to be
9127
	 * changed. We could be more clever, but that would require us to be
8487
	 * more careful with calling the relevant encoder->mode_set functions.
9128
	 * more careful with calling the relevant encoder->mode_set functions.
8488
	 */
9129
	 */
8489
	if (*prepare_pipes)
9130
	if (*prepare_pipes)
8490
		*modeset_pipes = *prepare_pipes;
9131
		*modeset_pipes = *prepare_pipes;
8491
 
9132
 
8492
	/* ... and mask these out. */
9133
	/* ... and mask these out. */
8493
	*modeset_pipes &= ~(*disable_pipes);
9134
	*modeset_pipes &= ~(*disable_pipes);
8494
	*prepare_pipes &= ~(*disable_pipes);
9135
	*prepare_pipes &= ~(*disable_pipes);
8495
 
9136
 
8496
	/*
9137
	/*
8497
	 * HACK: We don't (yet) fully support global modesets. intel_set_config
9138
	 * HACK: We don't (yet) fully support global modesets. intel_set_config
8498
	 * obies this rule, but the modeset restore mode of
9139
	 * obies this rule, but the modeset restore mode of
8499
	 * intel_modeset_setup_hw_state does not.
9140
	 * intel_modeset_setup_hw_state does not.
8500
	 */
9141
	 */
8501
	*modeset_pipes &= 1 << intel_crtc->pipe;
9142
	*modeset_pipes &= 1 << intel_crtc->pipe;
8502
	*prepare_pipes &= 1 << intel_crtc->pipe;
9143
	*prepare_pipes &= 1 << intel_crtc->pipe;
8503
 
9144
 
8504
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
9145
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
8505
		      *modeset_pipes, *prepare_pipes, *disable_pipes);
9146
		      *modeset_pipes, *prepare_pipes, *disable_pipes);
8506
}
9147
}
8507
 
9148
 
8508
static bool intel_crtc_in_use(struct drm_crtc *crtc)
9149
static bool intel_crtc_in_use(struct drm_crtc *crtc)
8509
{
9150
{
8510
	struct drm_encoder *encoder;
9151
	struct drm_encoder *encoder;
8511
	struct drm_device *dev = crtc->dev;
9152
	struct drm_device *dev = crtc->dev;
8512
 
9153
 
8513
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
9154
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
8514
		if (encoder->crtc == crtc)
9155
		if (encoder->crtc == crtc)
8515
			return true;
9156
			return true;
8516
 
9157
 
8517
	return false;
9158
	return false;
8518
}
9159
}
8519
 
9160
 
8520
static void
9161
static void
8521
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
9162
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
8522
{
9163
{
8523
	struct intel_encoder *intel_encoder;
9164
	struct intel_encoder *intel_encoder;
8524
	struct intel_crtc *intel_crtc;
9165
	struct intel_crtc *intel_crtc;
8525
	struct drm_connector *connector;
9166
	struct drm_connector *connector;
8526
 
9167
 
8527
	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
9168
	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
8528
			    base.head) {
9169
			    base.head) {
8529
		if (!intel_encoder->base.crtc)
9170
		if (!intel_encoder->base.crtc)
8530
			continue;
9171
			continue;
8531
 
9172
 
8532
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
9173
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
8533
 
9174
 
8534
		if (prepare_pipes & (1 << intel_crtc->pipe))
9175
		if (prepare_pipes & (1 << intel_crtc->pipe))
8535
			intel_encoder->connectors_active = false;
9176
			intel_encoder->connectors_active = false;
8536
	}
9177
	}
8537
 
9178
 
8538
	intel_modeset_commit_output_state(dev);
9179
	intel_modeset_commit_output_state(dev);
8539
 
9180
 
8540
	/* Update computed state. */
9181
	/* Update computed state. */
8541
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9182
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
8542
			    base.head) {
9183
			    base.head) {
8543
		intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
9184
		intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
8544
	}
9185
	}
8545
 
9186
 
8546
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9187
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
8547
		if (!connector->encoder || !connector->encoder->crtc)
9188
		if (!connector->encoder || !connector->encoder->crtc)
8548
			continue;
9189
			continue;
8549
 
9190
 
8550
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
9191
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
8551
 
9192
 
8552
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
9193
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
8553
			struct drm_property *dpms_property =
9194
			struct drm_property *dpms_property =
8554
				dev->mode_config.dpms_property;
9195
				dev->mode_config.dpms_property;
8555
 
9196
 
8556
			connector->dpms = DRM_MODE_DPMS_ON;
9197
			connector->dpms = DRM_MODE_DPMS_ON;
8557
			drm_object_property_set_value(&connector->base,
9198
			drm_object_property_set_value(&connector->base,
8558
							 dpms_property,
9199
							 dpms_property,
8559
							 DRM_MODE_DPMS_ON);
9200
							 DRM_MODE_DPMS_ON);
8560
 
9201
 
8561
			intel_encoder = to_intel_encoder(connector->encoder);
9202
			intel_encoder = to_intel_encoder(connector->encoder);
8562
			intel_encoder->connectors_active = true;
9203
			intel_encoder->connectors_active = true;
8563
		}
9204
		}
8564
	}
9205
	}
8565
 
9206
 
8566
}
9207
}
8567
 
9208
 
8568
static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
-
 
8569
				    struct intel_crtc_config *new)
9209
static bool intel_fuzzy_clock_check(int clock1, int clock2)
8570
{
9210
{
8571
	int clock1, clock2, diff;
-
 
8572
 
-
 
8573
	clock1 = cur->adjusted_mode.clock;
-
 
8574
	clock2 = new->adjusted_mode.clock;
9211
	int diff;
8575
 
9212
 
8576
	if (clock1 == clock2)
9213
	if (clock1 == clock2)
8577
		return true;
9214
		return true;
8578
 
9215
 
8579
	if (!clock1 || !clock2)
9216
	if (!clock1 || !clock2)
8580
		return false;
9217
		return false;
8581
 
9218
 
8582
	diff = abs(clock1 - clock2);
9219
	diff = abs(clock1 - clock2);
8583
 
9220
 
8584
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
9221
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8585
		return true;
9222
		return true;
8586
 
9223
 
8587
	return false;
9224
	return false;
8588
}
9225
}
8589
 
9226
 
8590
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
9227
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
8591
	list_for_each_entry((intel_crtc), \
9228
	list_for_each_entry((intel_crtc), \
8592
			    &(dev)->mode_config.crtc_list, \
9229
			    &(dev)->mode_config.crtc_list, \
8593
			    base.head) \
9230
			    base.head) \
8594
		if (mask & (1 <<(intel_crtc)->pipe))
9231
		if (mask & (1 <<(intel_crtc)->pipe))
8595
 
9232
 
8596
static bool
9233
static bool
8597
intel_pipe_config_compare(struct drm_device *dev,
9234
intel_pipe_config_compare(struct drm_device *dev,
8598
			  struct intel_crtc_config *current_config,
9235
			  struct intel_crtc_config *current_config,
8599
			  struct intel_crtc_config *pipe_config)
9236
			  struct intel_crtc_config *pipe_config)
8600
{
9237
{
8601
#define PIPE_CONF_CHECK_X(name)	\
9238
#define PIPE_CONF_CHECK_X(name)	\
8602
	if (current_config->name != pipe_config->name) { \
9239
	if (current_config->name != pipe_config->name) { \
8603
		DRM_ERROR("mismatch in " #name " " \
9240
		DRM_ERROR("mismatch in " #name " " \
8604
			  "(expected 0x%08x, found 0x%08x)\n", \
9241
			  "(expected 0x%08x, found 0x%08x)\n", \
8605
			  current_config->name, \
9242
			  current_config->name, \
8606
			  pipe_config->name); \
9243
			  pipe_config->name); \
8607
		return false; \
9244
		return false; \
8608
	}
9245
	}
8609
 
9246
 
8610
#define PIPE_CONF_CHECK_I(name)	\
9247
#define PIPE_CONF_CHECK_I(name)	\
8611
	if (current_config->name != pipe_config->name) { \
9248
	if (current_config->name != pipe_config->name) { \
8612
		DRM_ERROR("mismatch in " #name " " \
9249
		DRM_ERROR("mismatch in " #name " " \
8613
			  "(expected %i, found %i)\n", \
9250
			  "(expected %i, found %i)\n", \
8614
			  current_config->name, \
9251
			  current_config->name, \
8615
			  pipe_config->name); \
9252
			  pipe_config->name); \
8616
		return false; \
9253
		return false; \
8617
	}
9254
	}
8618
 
9255
 
8619
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
9256
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
8620
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
9257
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
8621
		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
9258
		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
8622
			  "(expected %i, found %i)\n", \
9259
			  "(expected %i, found %i)\n", \
8623
			  current_config->name & (mask), \
9260
			  current_config->name & (mask), \
8624
			  pipe_config->name & (mask)); \
9261
			  pipe_config->name & (mask)); \
8625
		return false; \
9262
		return false; \
8626
	}
9263
	}
-
 
9264
 
-
 
9265
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
-
 
9266
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
-
 
9267
		DRM_ERROR("mismatch in " #name " " \
-
 
9268
			  "(expected %i, found %i)\n", \
-
 
9269
			  current_config->name, \
-
 
9270
			  pipe_config->name); \
-
 
9271
		return false; \
-
 
9272
	}
8627
 
9273
 
8628
#define PIPE_CONF_QUIRK(quirk)	\
9274
#define PIPE_CONF_QUIRK(quirk)	\
8629
	((current_config->quirks | pipe_config->quirks) & (quirk))
9275
	((current_config->quirks | pipe_config->quirks) & (quirk))
8630
 
9276
 
8631
	PIPE_CONF_CHECK_I(cpu_transcoder);
9277
	PIPE_CONF_CHECK_I(cpu_transcoder);
8632
 
9278
 
8633
	PIPE_CONF_CHECK_I(has_pch_encoder);
9279
	PIPE_CONF_CHECK_I(has_pch_encoder);
8634
	PIPE_CONF_CHECK_I(fdi_lanes);
9280
	PIPE_CONF_CHECK_I(fdi_lanes);
8635
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
9281
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
8636
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
9282
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
8637
	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
9283
	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
8638
	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
9284
	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
8639
	PIPE_CONF_CHECK_I(fdi_m_n.tu);
9285
	PIPE_CONF_CHECK_I(fdi_m_n.tu);
-
 
9286
 
-
 
9287
	PIPE_CONF_CHECK_I(has_dp_encoder);
-
 
9288
	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
-
 
9289
	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
-
 
9290
	PIPE_CONF_CHECK_I(dp_m_n.link_m);
-
 
9291
	PIPE_CONF_CHECK_I(dp_m_n.link_n);
-
 
9292
	PIPE_CONF_CHECK_I(dp_m_n.tu);
8640
 
9293
 
8641
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
9294
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
8642
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
9295
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
8643
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
9296
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
8644
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
9297
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
8645
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
9298
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
8646
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
9299
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
8647
 
9300
 
8648
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
9301
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
8649
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
9302
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
8650
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
9303
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
8651
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
9304
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
8652
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
9305
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
8653
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
9306
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
8654
 
9307
 
8655
		PIPE_CONF_CHECK_I(pixel_multiplier);
9308
		PIPE_CONF_CHECK_I(pixel_multiplier);
8656
 
9309
 
8657
	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9310
	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8658
			      DRM_MODE_FLAG_INTERLACE);
9311
			      DRM_MODE_FLAG_INTERLACE);
8659
 
9312
 
8660
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
9313
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8661
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9314
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8662
				      DRM_MODE_FLAG_PHSYNC);
9315
				      DRM_MODE_FLAG_PHSYNC);
8663
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9316
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8664
				      DRM_MODE_FLAG_NHSYNC);
9317
				      DRM_MODE_FLAG_NHSYNC);
8665
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9318
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8666
				      DRM_MODE_FLAG_PVSYNC);
9319
				      DRM_MODE_FLAG_PVSYNC);
8667
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9320
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8668
				      DRM_MODE_FLAG_NVSYNC);
9321
				      DRM_MODE_FLAG_NVSYNC);
8669
	}
9322
	}
8670
 
9323
 
8671
	PIPE_CONF_CHECK_I(requested_mode.hdisplay);
9324
	PIPE_CONF_CHECK_I(pipe_src_w);
8672
	PIPE_CONF_CHECK_I(requested_mode.vdisplay);
9325
	PIPE_CONF_CHECK_I(pipe_src_h);
8673
 
9326
 
8674
	PIPE_CONF_CHECK_I(gmch_pfit.control);
9327
	PIPE_CONF_CHECK_I(gmch_pfit.control);
8675
	/* pfit ratios are autocomputed by the hw on gen4+ */
9328
	/* pfit ratios are autocomputed by the hw on gen4+ */
8676
	if (INTEL_INFO(dev)->gen < 4)
9329
	if (INTEL_INFO(dev)->gen < 4)
8677
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
9330
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
8678
	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
9331
	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
8679
	PIPE_CONF_CHECK_I(pch_pfit.enabled);
9332
	PIPE_CONF_CHECK_I(pch_pfit.enabled);
8680
	if (current_config->pch_pfit.enabled) {
9333
	if (current_config->pch_pfit.enabled) {
8681
	PIPE_CONF_CHECK_I(pch_pfit.pos);
9334
	PIPE_CONF_CHECK_I(pch_pfit.pos);
8682
	PIPE_CONF_CHECK_I(pch_pfit.size);
9335
	PIPE_CONF_CHECK_I(pch_pfit.size);
8683
	}
9336
	}
-
 
9337
 
-
 
9338
	/* BDW+ don't expose a synchronous way to read the state */
8684
 
9339
	if (IS_HASWELL(dev))
-
 
9340
	PIPE_CONF_CHECK_I(ips_enabled);
-
 
9341
 
8685
	PIPE_CONF_CHECK_I(ips_enabled);
9342
	PIPE_CONF_CHECK_I(double_wide);
8686
 
9343
 
8687
	PIPE_CONF_CHECK_I(shared_dpll);
9344
	PIPE_CONF_CHECK_I(shared_dpll);
8688
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
9345
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8689
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
9346
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8690
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
9347
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8691
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
9348
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8692
 
9349
 
8693
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9350
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
8694
		PIPE_CONF_CHECK_I(pipe_bpp);
9351
		PIPE_CONF_CHECK_I(pipe_bpp);
-
 
9352
 
-
 
9353
	if (!HAS_DDI(dev)) {
-
 
9354
		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
-
 
9355
		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
-
 
9356
	}
8695
 
9357
 
8696
#undef PIPE_CONF_CHECK_X
9358
#undef PIPE_CONF_CHECK_X
8697
#undef PIPE_CONF_CHECK_I
9359
#undef PIPE_CONF_CHECK_I
8698
#undef PIPE_CONF_CHECK_FLAGS
9360
#undef PIPE_CONF_CHECK_FLAGS
-
 
9361
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
8699
#undef PIPE_CONF_QUIRK
9362
#undef PIPE_CONF_QUIRK
8700
 
-
 
8701
	if (!IS_HASWELL(dev)) {
-
 
8702
		if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
-
 
8703
			DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
-
 
8704
				  current_config->adjusted_mode.clock,
-
 
8705
				  pipe_config->adjusted_mode.clock);
-
 
8706
			return false;
-
 
8707
		}
-
 
8708
	}
-
 
8709
 
9363
 
8710
	return true;
9364
	return true;
8711
}
9365
}
8712
 
9366
 
8713
static void
9367
static void
8714
check_connector_state(struct drm_device *dev)
9368
check_connector_state(struct drm_device *dev)
8715
{
9369
{
8716
	struct intel_connector *connector;
9370
	struct intel_connector *connector;
8717
 
9371
 
8718
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9372
	list_for_each_entry(connector, &dev->mode_config.connector_list,
8719
			    base.head) {
9373
			    base.head) {
8720
		/* This also checks the encoder/connector hw state with the
9374
		/* This also checks the encoder/connector hw state with the
8721
		 * ->get_hw_state callbacks. */
9375
		 * ->get_hw_state callbacks. */
8722
		intel_connector_check_state(connector);
9376
		intel_connector_check_state(connector);
8723
 
9377
 
8724
		WARN(&connector->new_encoder->base != connector->base.encoder,
9378
		WARN(&connector->new_encoder->base != connector->base.encoder,
8725
		     "connector's staged encoder doesn't match current encoder\n");
9379
		     "connector's staged encoder doesn't match current encoder\n");
8726
	}
9380
	}
8727
}
9381
}
8728
 
9382
 
8729
static void
9383
static void
8730
check_encoder_state(struct drm_device *dev)
9384
check_encoder_state(struct drm_device *dev)
8731
{
9385
{
8732
	struct intel_encoder *encoder;
9386
	struct intel_encoder *encoder;
8733
	struct intel_connector *connector;
9387
	struct intel_connector *connector;
8734
 
9388
 
8735
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9389
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8736
			    base.head) {
9390
			    base.head) {
8737
		bool enabled = false;
9391
		bool enabled = false;
8738
		bool active = false;
9392
		bool active = false;
8739
		enum pipe pipe, tracked_pipe;
9393
		enum pipe pipe, tracked_pipe;
8740
 
9394
 
8741
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
9395
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
8742
			      encoder->base.base.id,
9396
			      encoder->base.base.id,
8743
			      drm_get_encoder_name(&encoder->base));
9397
			      drm_get_encoder_name(&encoder->base));
8744
 
9398
 
8745
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
9399
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
8746
		     "encoder's stage crtc doesn't match current crtc\n");
9400
		     "encoder's stage crtc doesn't match current crtc\n");
8747
		WARN(encoder->connectors_active && !encoder->base.crtc,
9401
		WARN(encoder->connectors_active && !encoder->base.crtc,
8748
		     "encoder's active_connectors set, but no crtc\n");
9402
		     "encoder's active_connectors set, but no crtc\n");
8749
 
9403
 
8750
		list_for_each_entry(connector, &dev->mode_config.connector_list,
9404
		list_for_each_entry(connector, &dev->mode_config.connector_list,
8751
				    base.head) {
9405
				    base.head) {
8752
			if (connector->base.encoder != &encoder->base)
9406
			if (connector->base.encoder != &encoder->base)
8753
				continue;
9407
				continue;
8754
			enabled = true;
9408
			enabled = true;
8755
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
9409
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
8756
				active = true;
9410
				active = true;
8757
		}
9411
		}
8758
		WARN(!!encoder->base.crtc != enabled,
9412
		WARN(!!encoder->base.crtc != enabled,
8759
		     "encoder's enabled state mismatch "
9413
		     "encoder's enabled state mismatch "
8760
		     "(expected %i, found %i)\n",
9414
		     "(expected %i, found %i)\n",
8761
		     !!encoder->base.crtc, enabled);
9415
		     !!encoder->base.crtc, enabled);
8762
		WARN(active && !encoder->base.crtc,
9416
		WARN(active && !encoder->base.crtc,
8763
		     "active encoder with no crtc\n");
9417
		     "active encoder with no crtc\n");
8764
 
9418
 
8765
		WARN(encoder->connectors_active != active,
9419
		WARN(encoder->connectors_active != active,
8766
		     "encoder's computed active state doesn't match tracked active state "
9420
		     "encoder's computed active state doesn't match tracked active state "
8767
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
9421
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
8768
 
9422
 
8769
		active = encoder->get_hw_state(encoder, &pipe);
9423
		active = encoder->get_hw_state(encoder, &pipe);
8770
		WARN(active != encoder->connectors_active,
9424
		WARN(active != encoder->connectors_active,
8771
		     "encoder's hw state doesn't match sw tracking "
9425
		     "encoder's hw state doesn't match sw tracking "
8772
		     "(expected %i, found %i)\n",
9426
		     "(expected %i, found %i)\n",
8773
		     encoder->connectors_active, active);
9427
		     encoder->connectors_active, active);
8774
 
9428
 
8775
		if (!encoder->base.crtc)
9429
		if (!encoder->base.crtc)
8776
			continue;
9430
			continue;
8777
 
9431
 
8778
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
9432
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
8779
		WARN(active && pipe != tracked_pipe,
9433
		WARN(active && pipe != tracked_pipe,
8780
		     "active encoder's pipe doesn't match"
9434
		     "active encoder's pipe doesn't match"
8781
		     "(expected %i, found %i)\n",
9435
		     "(expected %i, found %i)\n",
8782
		     tracked_pipe, pipe);
9436
		     tracked_pipe, pipe);
8783
 
9437
 
8784
	}
9438
	}
8785
}
9439
}
8786
 
9440
 
8787
static void
9441
static void
8788
check_crtc_state(struct drm_device *dev)
9442
check_crtc_state(struct drm_device *dev)
8789
{
9443
{
8790
	drm_i915_private_t *dev_priv = dev->dev_private;
9444
	drm_i915_private_t *dev_priv = dev->dev_private;
8791
	struct intel_crtc *crtc;
9445
	struct intel_crtc *crtc;
8792
	struct intel_encoder *encoder;
9446
	struct intel_encoder *encoder;
8793
	struct intel_crtc_config pipe_config;
9447
	struct intel_crtc_config pipe_config;
8794
 
9448
 
8795
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9449
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
8796
			    base.head) {
9450
			    base.head) {
8797
		bool enabled = false;
9451
		bool enabled = false;
8798
		bool active = false;
9452
		bool active = false;
8799
 
9453
 
8800
		memset(&pipe_config, 0, sizeof(pipe_config));
9454
		memset(&pipe_config, 0, sizeof(pipe_config));
8801
 
9455
 
8802
		DRM_DEBUG_KMS("[CRTC:%d]\n",
9456
		DRM_DEBUG_KMS("[CRTC:%d]\n",
8803
			      crtc->base.base.id);
9457
			      crtc->base.base.id);
8804
 
9458
 
8805
		WARN(crtc->active && !crtc->base.enabled,
9459
		WARN(crtc->active && !crtc->base.enabled,
8806
		     "active crtc, but not enabled in sw tracking\n");
9460
		     "active crtc, but not enabled in sw tracking\n");
8807
 
9461
 
8808
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9462
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8809
				    base.head) {
9463
				    base.head) {
8810
			if (encoder->base.crtc != &crtc->base)
9464
			if (encoder->base.crtc != &crtc->base)
8811
				continue;
9465
				continue;
8812
			enabled = true;
9466
			enabled = true;
8813
			if (encoder->connectors_active)
9467
			if (encoder->connectors_active)
8814
				active = true;
9468
				active = true;
8815
		}
9469
		}
8816
 
9470
 
8817
		WARN(active != crtc->active,
9471
		WARN(active != crtc->active,
8818
		     "crtc's computed active state doesn't match tracked active state "
9472
		     "crtc's computed active state doesn't match tracked active state "
8819
		     "(expected %i, found %i)\n", active, crtc->active);
9473
		     "(expected %i, found %i)\n", active, crtc->active);
8820
		WARN(enabled != crtc->base.enabled,
9474
		WARN(enabled != crtc->base.enabled,
8821
		     "crtc's computed enabled state doesn't match tracked enabled state "
9475
		     "crtc's computed enabled state doesn't match tracked enabled state "
8822
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
9476
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
8823
 
9477
 
8824
		active = dev_priv->display.get_pipe_config(crtc,
9478
		active = dev_priv->display.get_pipe_config(crtc,
8825
							   &pipe_config);
9479
							   &pipe_config);
8826
 
9480
 
8827
		/* hw state is inconsistent with the pipe A quirk */
9481
		/* hw state is inconsistent with the pipe A quirk */
8828
		if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
9482
		if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
8829
			active = crtc->active;
9483
			active = crtc->active;
8830
 
9484
 
8831
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9485
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8832
				    base.head) {
9486
				    base.head) {
8833
			enum pipe pipe;
9487
			enum pipe pipe;
8834
			if (encoder->base.crtc != &crtc->base)
9488
			if (encoder->base.crtc != &crtc->base)
8835
				continue;
9489
				continue;
8836
			if (encoder->get_config &&
-
 
8837
			    encoder->get_hw_state(encoder, &pipe))
9490
			if (encoder->get_hw_state(encoder, &pipe))
8838
				encoder->get_config(encoder, &pipe_config);
9491
				encoder->get_config(encoder, &pipe_config);
8839
		}
9492
		}
8840
 
-
 
8841
		if (dev_priv->display.get_clock)
-
 
8842
			dev_priv->display.get_clock(crtc, &pipe_config);
-
 
8843
 
9493
 
8844
		WARN(crtc->active != active,
9494
		WARN(crtc->active != active,
8845
		     "crtc active state doesn't match with hw state "
9495
		     "crtc active state doesn't match with hw state "
8846
		     "(expected %i, found %i)\n", crtc->active, active);
9496
		     "(expected %i, found %i)\n", crtc->active, active);
8847
 
9497
 
8848
		if (active &&
9498
		if (active &&
8849
		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
9499
		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
8850
			WARN(1, "pipe state doesn't match!\n");
9500
			WARN(1, "pipe state doesn't match!\n");
8851
			intel_dump_pipe_config(crtc, &pipe_config,
9501
			intel_dump_pipe_config(crtc, &pipe_config,
8852
					       "[hw state]");
9502
					       "[hw state]");
8853
			intel_dump_pipe_config(crtc, &crtc->config,
9503
			intel_dump_pipe_config(crtc, &crtc->config,
8854
					       "[sw state]");
9504
					       "[sw state]");
8855
		}
9505
		}
8856
	}
9506
	}
8857
}
9507
}
8858
 
9508
 
8859
static void
9509
static void
8860
check_shared_dpll_state(struct drm_device *dev)
9510
check_shared_dpll_state(struct drm_device *dev)
8861
{
9511
{
8862
	drm_i915_private_t *dev_priv = dev->dev_private;
9512
	drm_i915_private_t *dev_priv = dev->dev_private;
8863
	struct intel_crtc *crtc;
9513
	struct intel_crtc *crtc;
8864
	struct intel_dpll_hw_state dpll_hw_state;
9514
	struct intel_dpll_hw_state dpll_hw_state;
8865
	int i;
9515
	int i;
8866
 
9516
 
8867
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9517
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8868
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
9518
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
8869
		int enabled_crtcs = 0, active_crtcs = 0;
9519
		int enabled_crtcs = 0, active_crtcs = 0;
8870
		bool active;
9520
		bool active;
8871
 
9521
 
8872
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
9522
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8873
 
9523
 
8874
		DRM_DEBUG_KMS("%s\n", pll->name);
9524
		DRM_DEBUG_KMS("%s\n", pll->name);
8875
 
9525
 
8876
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
9526
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
8877
 
9527
 
8878
		WARN(pll->active > pll->refcount,
9528
		WARN(pll->active > pll->refcount,
8879
		     "more active pll users than references: %i vs %i\n",
9529
		     "more active pll users than references: %i vs %i\n",
8880
		     pll->active, pll->refcount);
9530
		     pll->active, pll->refcount);
8881
		WARN(pll->active && !pll->on,
9531
		WARN(pll->active && !pll->on,
8882
		     "pll in active use but not on in sw tracking\n");
9532
		     "pll in active use but not on in sw tracking\n");
8883
		WARN(pll->on && !pll->active,
9533
		WARN(pll->on && !pll->active,
8884
		     "pll in on but not on in use in sw tracking\n");
9534
		     "pll in on but not on in use in sw tracking\n");
8885
		WARN(pll->on != active,
9535
		WARN(pll->on != active,
8886
		     "pll on state mismatch (expected %i, found %i)\n",
9536
		     "pll on state mismatch (expected %i, found %i)\n",
8887
		     pll->on, active);
9537
		     pll->on, active);
8888
 
9538
 
8889
		list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9539
		list_for_each_entry(crtc, &dev->mode_config.crtc_list,
8890
				    base.head) {
9540
				    base.head) {
8891
			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
9541
			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
8892
				enabled_crtcs++;
9542
				enabled_crtcs++;
8893
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
9543
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
8894
				active_crtcs++;
9544
				active_crtcs++;
8895
		}
9545
		}
8896
		WARN(pll->active != active_crtcs,
9546
		WARN(pll->active != active_crtcs,
8897
		     "pll active crtcs mismatch (expected %i, found %i)\n",
9547
		     "pll active crtcs mismatch (expected %i, found %i)\n",
8898
		     pll->active, active_crtcs);
9548
		     pll->active, active_crtcs);
8899
		WARN(pll->refcount != enabled_crtcs,
9549
		WARN(pll->refcount != enabled_crtcs,
8900
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
9550
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
8901
		     pll->refcount, enabled_crtcs);
9551
		     pll->refcount, enabled_crtcs);
8902
 
9552
 
8903
		WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
9553
		WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
8904
				       sizeof(dpll_hw_state)),
9554
				       sizeof(dpll_hw_state)),
8905
		     "pll hw state mismatch\n");
9555
		     "pll hw state mismatch\n");
8906
	}
9556
	}
8907
}
9557
}
8908
 
9558
 
8909
void
9559
void
8910
intel_modeset_check_state(struct drm_device *dev)
9560
intel_modeset_check_state(struct drm_device *dev)
8911
{
9561
{
8912
	check_connector_state(dev);
9562
	check_connector_state(dev);
8913
	check_encoder_state(dev);
9563
	check_encoder_state(dev);
8914
	check_crtc_state(dev);
9564
	check_crtc_state(dev);
8915
	check_shared_dpll_state(dev);
9565
	check_shared_dpll_state(dev);
8916
}
9566
}
-
 
9567
 
-
 
9568
void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
-
 
9569
				     int dotclock)
-
 
9570
{
-
 
9571
	/*
-
 
9572
	 * FDI already provided one idea for the dotclock.
-
 
9573
	 * Yell if the encoder disagrees.
-
 
9574
	 */
-
 
9575
	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
-
 
9576
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
-
 
9577
	     pipe_config->adjusted_mode.crtc_clock, dotclock);
-
 
9578
}
8917
 
9579
 
8918
static int __intel_set_mode(struct drm_crtc *crtc,
9580
static int __intel_set_mode(struct drm_crtc *crtc,
8919
		    struct drm_display_mode *mode,
9581
		    struct drm_display_mode *mode,
8920
		    int x, int y, struct drm_framebuffer *fb)
9582
		    int x, int y, struct drm_framebuffer *fb)
8921
{
9583
{
8922
	struct drm_device *dev = crtc->dev;
9584
	struct drm_device *dev = crtc->dev;
8923
	drm_i915_private_t *dev_priv = dev->dev_private;
9585
	drm_i915_private_t *dev_priv = dev->dev_private;
8924
	struct drm_display_mode *saved_mode, *saved_hwmode;
9586
	struct drm_display_mode *saved_mode;
8925
	struct intel_crtc_config *pipe_config = NULL;
9587
	struct intel_crtc_config *pipe_config = NULL;
8926
	struct intel_crtc *intel_crtc;
9588
	struct intel_crtc *intel_crtc;
8927
	unsigned disable_pipes, prepare_pipes, modeset_pipes;
9589
	unsigned disable_pipes, prepare_pipes, modeset_pipes;
8928
	int ret = 0;
9590
	int ret = 0;
8929
 
9591
 
8930
	saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
9592
	saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
8931
	if (!saved_mode)
9593
	if (!saved_mode)
8932
		return -ENOMEM;
-
 
8933
	saved_hwmode = saved_mode + 1;
9594
		return -ENOMEM;
8934
 
9595
 
8935
	intel_modeset_affected_pipes(crtc, &modeset_pipes,
9596
	intel_modeset_affected_pipes(crtc, &modeset_pipes,
8936
				     &prepare_pipes, &disable_pipes);
9597
				     &prepare_pipes, &disable_pipes);
8937
 
-
 
8938
	*saved_hwmode = crtc->hwmode;
9598
 
8939
	*saved_mode = crtc->mode;
9599
	*saved_mode = crtc->mode;
8940
 
9600
 
8941
	/* Hack: Because we don't (yet) support global modeset on multiple
9601
	/* Hack: Because we don't (yet) support global modeset on multiple
8942
	 * crtcs, we don't keep track of the new mode for more than one crtc.
9602
	 * crtcs, we don't keep track of the new mode for more than one crtc.
8943
	 * Hence simply check whether any bit is set in modeset_pipes in all the
9603
	 * Hence simply check whether any bit is set in modeset_pipes in all the
8944
	 * pieces of code that are not yet converted to deal with mutliple crtcs
9604
	 * pieces of code that are not yet converted to deal with mutliple crtcs
8945
	 * changing their mode at the same time. */
9605
	 * changing their mode at the same time. */
8946
	if (modeset_pipes) {
9606
	if (modeset_pipes) {
8947
		pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
9607
		pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
8948
		if (IS_ERR(pipe_config)) {
9608
		if (IS_ERR(pipe_config)) {
8949
			ret = PTR_ERR(pipe_config);
9609
			ret = PTR_ERR(pipe_config);
8950
			pipe_config = NULL;
9610
			pipe_config = NULL;
8951
 
9611
 
8952
			goto out;
9612
			goto out;
8953
		}
9613
		}
8954
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
9614
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
8955
				       "[modeset]");
9615
				       "[modeset]");
8956
	}
9616
	}
-
 
9617
 
-
 
9618
	/*
-
 
9619
	 * See if the config requires any additional preparation, e.g.
-
 
9620
	 * to adjust global state with pipes off.  We need to do this
-
 
9621
	 * here so we can get the modeset_pipe updated config for the new
-
 
9622
	 * mode set on this crtc.  For other crtcs we need to use the
-
 
9623
	 * adjusted_mode bits in the crtc directly.
-
 
9624
	 */
-
 
9625
	if (IS_VALLEYVIEW(dev)) {
-
 
9626
		valleyview_modeset_global_pipes(dev, &prepare_pipes,
-
 
9627
						modeset_pipes, pipe_config);
-
 
9628
 
-
 
9629
		/* may have added more to prepare_pipes than we should */
-
 
9630
		prepare_pipes &= ~disable_pipes;
-
 
9631
	}
8957
 
9632
 
8958
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
9633
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
8959
		intel_crtc_disable(&intel_crtc->base);
9634
		intel_crtc_disable(&intel_crtc->base);
8960
 
9635
 
8961
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
9636
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
8962
		if (intel_crtc->base.enabled)
9637
		if (intel_crtc->base.enabled)
8963
			dev_priv->display.crtc_disable(&intel_crtc->base);
9638
			dev_priv->display.crtc_disable(&intel_crtc->base);
8964
	}
9639
	}
8965
 
9640
 
8966
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
9641
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
8967
	 * to set it here already despite that we pass it down the callchain.
9642
	 * to set it here already despite that we pass it down the callchain.
8968
	 */
9643
	 */
8969
	if (modeset_pipes) {
9644
	if (modeset_pipes) {
8970
		crtc->mode = *mode;
9645
		crtc->mode = *mode;
8971
		/* mode_set/enable/disable functions rely on a correct pipe
9646
		/* mode_set/enable/disable functions rely on a correct pipe
8972
		 * config. */
9647
		 * config. */
8973
		to_intel_crtc(crtc)->config = *pipe_config;
9648
		to_intel_crtc(crtc)->config = *pipe_config;
-
 
9649
 
-
 
9650
		/*
-
 
9651
		 * Calculate and store various constants which
-
 
9652
		 * are later needed by vblank and swap-completion
-
 
9653
		 * timestamping. They are derived from true hwmode.
-
 
9654
		 */
-
 
9655
		drm_calc_timestamping_constants(crtc,
-
 
9656
						&pipe_config->adjusted_mode);
8974
	}
9657
	}
8975
 
9658
 
8976
	/* Only after disabling all output pipelines that will be changed can we
9659
	/* Only after disabling all output pipelines that will be changed can we
8977
	 * update the the output configuration. */
9660
	 * update the the output configuration. */
8978
	intel_modeset_update_state(dev, prepare_pipes);
9661
	intel_modeset_update_state(dev, prepare_pipes);
8979
 
9662
 
8980
	if (dev_priv->display.modeset_global_resources)
9663
	if (dev_priv->display.modeset_global_resources)
8981
		dev_priv->display.modeset_global_resources(dev);
9664
		dev_priv->display.modeset_global_resources(dev);
8982
 
9665
 
8983
	/* Set up the DPLL and any encoders state that needs to adjust or depend
9666
	/* Set up the DPLL and any encoders state that needs to adjust or depend
8984
	 * on the DPLL.
9667
	 * on the DPLL.
8985
	 */
9668
	 */
8986
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
9669
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
8987
		ret = intel_crtc_mode_set(&intel_crtc->base,
9670
		ret = intel_crtc_mode_set(&intel_crtc->base,
8988
					   x, y, fb);
9671
					   x, y, fb);
8989
		if (ret)
9672
		if (ret)
8990
		    goto done;
9673
		    goto done;
8991
	}
9674
	}
8992
 
9675
 
8993
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
9676
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
8994
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
9677
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
8995
		dev_priv->display.crtc_enable(&intel_crtc->base);
9678
		dev_priv->display.crtc_enable(&intel_crtc->base);
8996
 
-
 
8997
	if (modeset_pipes) {
-
 
8998
		/* Store real post-adjustment hardware mode. */
-
 
8999
		crtc->hwmode = pipe_config->adjusted_mode;
-
 
9000
 
-
 
9001
		/* Calculate and store various constants which
-
 
9002
		 * are later needed by vblank and swap-completion
-
 
9003
		 * timestamping. They are derived from true hwmode.
-
 
9004
		 */
-
 
9005
		drm_calc_timestamping_constants(crtc);
-
 
9006
	}
-
 
9007
 
9679
 
9008
	/* FIXME: add subpixel order */
9680
	/* FIXME: add subpixel order */
9009
done:
9681
done:
9010
	if (ret && crtc->enabled) {
-
 
9011
		crtc->hwmode = *saved_hwmode;
9682
	if (ret && crtc->enabled)
9012
		crtc->mode = *saved_mode;
-
 
9013
	}
9683
		crtc->mode = *saved_mode;
9014
 
9684
 
9015
out:
9685
out:
9016
	kfree(pipe_config);
9686
	kfree(pipe_config);
9017
	kfree(saved_mode);
9687
	kfree(saved_mode);
9018
	return ret;
9688
	return ret;
9019
}
9689
}
9020
 
9690
 
9021
static int intel_set_mode(struct drm_crtc *crtc,
9691
static int intel_set_mode(struct drm_crtc *crtc,
9022
		     struct drm_display_mode *mode,
9692
		     struct drm_display_mode *mode,
9023
		     int x, int y, struct drm_framebuffer *fb)
9693
		     int x, int y, struct drm_framebuffer *fb)
9024
{
9694
{
9025
	int ret;
9695
	int ret;
9026
 
9696
 
9027
	ret = __intel_set_mode(crtc, mode, x, y, fb);
9697
	ret = __intel_set_mode(crtc, mode, x, y, fb);
9028
 
9698
 
9029
	if (ret == 0)
9699
	if (ret == 0)
9030
		intel_modeset_check_state(crtc->dev);
9700
		intel_modeset_check_state(crtc->dev);
9031
 
9701
 
9032
	return ret;
9702
	return ret;
9033
}
9703
}
9034
 
9704
 
9035
void intel_crtc_restore_mode(struct drm_crtc *crtc)
9705
void intel_crtc_restore_mode(struct drm_crtc *crtc)
9036
{
9706
{
9037
	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
9707
	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
9038
}
9708
}
9039
 
9709
 
9040
#undef for_each_intel_crtc_masked
9710
#undef for_each_intel_crtc_masked
9041
 
9711
 
9042
static void intel_set_config_free(struct intel_set_config *config)
9712
static void intel_set_config_free(struct intel_set_config *config)
9043
{
9713
{
9044
	if (!config)
9714
	if (!config)
9045
		return;
9715
		return;
9046
 
9716
 
9047
	kfree(config->save_connector_encoders);
9717
	kfree(config->save_connector_encoders);
9048
	kfree(config->save_encoder_crtcs);
9718
	kfree(config->save_encoder_crtcs);
9049
	kfree(config);
9719
	kfree(config);
9050
}
9720
}
9051
 
9721
 
9052
static int intel_set_config_save_state(struct drm_device *dev,
9722
static int intel_set_config_save_state(struct drm_device *dev,
9053
				       struct intel_set_config *config)
9723
				       struct intel_set_config *config)
9054
{
9724
{
9055
	struct drm_encoder *encoder;
9725
	struct drm_encoder *encoder;
9056
	struct drm_connector *connector;
9726
	struct drm_connector *connector;
9057
	int count;
9727
	int count;
9058
 
9728
 
9059
	config->save_encoder_crtcs =
9729
	config->save_encoder_crtcs =
9060
		kcalloc(dev->mode_config.num_encoder,
9730
		kcalloc(dev->mode_config.num_encoder,
9061
			sizeof(struct drm_crtc *), GFP_KERNEL);
9731
			sizeof(struct drm_crtc *), GFP_KERNEL);
9062
	if (!config->save_encoder_crtcs)
9732
	if (!config->save_encoder_crtcs)
9063
		return -ENOMEM;
9733
		return -ENOMEM;
9064
 
9734
 
9065
	config->save_connector_encoders =
9735
	config->save_connector_encoders =
9066
		kcalloc(dev->mode_config.num_connector,
9736
		kcalloc(dev->mode_config.num_connector,
9067
			sizeof(struct drm_encoder *), GFP_KERNEL);
9737
			sizeof(struct drm_encoder *), GFP_KERNEL);
9068
	if (!config->save_connector_encoders)
9738
	if (!config->save_connector_encoders)
9069
		return -ENOMEM;
9739
		return -ENOMEM;
9070
 
9740
 
9071
	/* Copy data. Note that driver private data is not affected.
9741
	/* Copy data. Note that driver private data is not affected.
9072
	 * Should anything bad happen only the expected state is
9742
	 * Should anything bad happen only the expected state is
9073
	 * restored, not the drivers personal bookkeeping.
9743
	 * restored, not the drivers personal bookkeeping.
9074
	 */
9744
	 */
9075
	count = 0;
9745
	count = 0;
9076
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
9746
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
9077
		config->save_encoder_crtcs[count++] = encoder->crtc;
9747
		config->save_encoder_crtcs[count++] = encoder->crtc;
9078
	}
9748
	}
9079
 
9749
 
9080
	count = 0;
9750
	count = 0;
9081
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9751
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9082
		config->save_connector_encoders[count++] = connector->encoder;
9752
		config->save_connector_encoders[count++] = connector->encoder;
9083
	}
9753
	}
9084
 
9754
 
9085
	return 0;
9755
	return 0;
9086
}
9756
}
9087
 
9757
 
9088
static void intel_set_config_restore_state(struct drm_device *dev,
9758
static void intel_set_config_restore_state(struct drm_device *dev,
9089
					   struct intel_set_config *config)
9759
					   struct intel_set_config *config)
9090
{
9760
{
9091
	struct intel_encoder *encoder;
9761
	struct intel_encoder *encoder;
9092
	struct intel_connector *connector;
9762
	struct intel_connector *connector;
9093
	int count;
9763
	int count;
9094
 
9764
 
9095
	count = 0;
9765
	count = 0;
9096
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9766
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9097
		encoder->new_crtc =
9767
		encoder->new_crtc =
9098
			to_intel_crtc(config->save_encoder_crtcs[count++]);
9768
			to_intel_crtc(config->save_encoder_crtcs[count++]);
9099
	}
9769
	}
9100
 
9770
 
9101
	count = 0;
9771
	count = 0;
9102
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
9772
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
9103
		connector->new_encoder =
9773
		connector->new_encoder =
9104
			to_intel_encoder(config->save_connector_encoders[count++]);
9774
			to_intel_encoder(config->save_connector_encoders[count++]);
9105
	}
9775
	}
9106
}
9776
}
9107
 
9777
 
9108
static bool
9778
static bool
9109
is_crtc_connector_off(struct drm_mode_set *set)
9779
is_crtc_connector_off(struct drm_mode_set *set)
9110
{
9780
{
9111
	int i;
9781
	int i;
9112
 
9782
 
9113
	if (set->num_connectors == 0)
9783
	if (set->num_connectors == 0)
9114
		return false;
9784
		return false;
9115
 
9785
 
9116
	if (WARN_ON(set->connectors == NULL))
9786
	if (WARN_ON(set->connectors == NULL))
9117
		return false;
9787
		return false;
9118
 
9788
 
9119
	for (i = 0; i < set->num_connectors; i++)
9789
	for (i = 0; i < set->num_connectors; i++)
9120
		if (set->connectors[i]->encoder &&
9790
		if (set->connectors[i]->encoder &&
9121
		    set->connectors[i]->encoder->crtc == set->crtc &&
9791
		    set->connectors[i]->encoder->crtc == set->crtc &&
9122
		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
9792
		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
9123
			return true;
9793
			return true;
9124
 
9794
 
9125
	return false;
9795
	return false;
9126
}
9796
}
9127
 
9797
 
9128
static void
9798
static void
9129
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
9799
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
9130
				      struct intel_set_config *config)
9800
				      struct intel_set_config *config)
9131
{
9801
{
9132
 
9802
 
9133
	/* We should be able to check here if the fb has the same properties
9803
	/* We should be able to check here if the fb has the same properties
9134
	 * and then just flip_or_move it */
9804
	 * and then just flip_or_move it */
9135
	if (is_crtc_connector_off(set)) {
9805
	if (is_crtc_connector_off(set)) {
9136
			config->mode_changed = true;
9806
			config->mode_changed = true;
9137
	} else if (set->crtc->fb != set->fb) {
9807
	} else if (set->crtc->fb != set->fb) {
9138
		/* If we have no fb then treat it as a full mode set */
9808
		/* If we have no fb then treat it as a full mode set */
9139
		if (set->crtc->fb == NULL) {
9809
		if (set->crtc->fb == NULL) {
9140
			struct intel_crtc *intel_crtc =
9810
			struct intel_crtc *intel_crtc =
9141
				to_intel_crtc(set->crtc);
9811
				to_intel_crtc(set->crtc);
9142
 
9812
 
9143
			if (intel_crtc->active && i915_fastboot) {
9813
			if (intel_crtc->active && i915_fastboot) {
9144
				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
9814
				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
9145
				config->fb_changed = true;
9815
				config->fb_changed = true;
9146
			} else {
9816
			} else {
9147
				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
9817
				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
9148
			config->mode_changed = true;
9818
			config->mode_changed = true;
9149
			}
9819
			}
9150
		} else if (set->fb == NULL) {
9820
		} else if (set->fb == NULL) {
9151
			config->mode_changed = true;
9821
			config->mode_changed = true;
9152
		} else if (set->fb->pixel_format !=
9822
		} else if (set->fb->pixel_format !=
9153
			   set->crtc->fb->pixel_format) {
9823
			   set->crtc->fb->pixel_format) {
9154
			config->mode_changed = true;
9824
			config->mode_changed = true;
9155
		} else {
9825
		} else {
9156
			config->fb_changed = true;
9826
			config->fb_changed = true;
9157
	}
9827
	}
9158
	}
9828
	}
9159
 
9829
 
9160
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
9830
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
9161
		config->fb_changed = true;
9831
		config->fb_changed = true;
9162
 
9832
 
9163
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
9833
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
9164
		DRM_DEBUG_KMS("modes are different, full mode set\n");
9834
		DRM_DEBUG_KMS("modes are different, full mode set\n");
9165
		drm_mode_debug_printmodeline(&set->crtc->mode);
9835
		drm_mode_debug_printmodeline(&set->crtc->mode);
9166
		drm_mode_debug_printmodeline(set->mode);
9836
		drm_mode_debug_printmodeline(set->mode);
9167
		config->mode_changed = true;
9837
		config->mode_changed = true;
9168
	}
9838
	}
9169
 
9839
 
9170
	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
9840
	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
9171
			set->crtc->base.id, config->mode_changed, config->fb_changed);
9841
			set->crtc->base.id, config->mode_changed, config->fb_changed);
9172
}
9842
}
9173
 
9843
 
9174
static int
9844
static int
9175
intel_modeset_stage_output_state(struct drm_device *dev,
9845
intel_modeset_stage_output_state(struct drm_device *dev,
9176
				 struct drm_mode_set *set,
9846
				 struct drm_mode_set *set,
9177
				 struct intel_set_config *config)
9847
				 struct intel_set_config *config)
9178
{
9848
{
9179
	struct drm_crtc *new_crtc;
9849
	struct drm_crtc *new_crtc;
9180
	struct intel_connector *connector;
9850
	struct intel_connector *connector;
9181
	struct intel_encoder *encoder;
9851
	struct intel_encoder *encoder;
9182
	int ro;
9852
	int ro;
9183
 
9853
 
9184
	/* The upper layers ensure that we either disable a crtc or have a list
9854
	/* The upper layers ensure that we either disable a crtc or have a list
9185
	 * of connectors. For paranoia, double-check this. */
9855
	 * of connectors. For paranoia, double-check this. */
9186
	WARN_ON(!set->fb && (set->num_connectors != 0));
9856
	WARN_ON(!set->fb && (set->num_connectors != 0));
9187
	WARN_ON(set->fb && (set->num_connectors == 0));
9857
	WARN_ON(set->fb && (set->num_connectors == 0));
9188
 
9858
 
9189
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9859
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9190
			    base.head) {
9860
			    base.head) {
9191
		/* Otherwise traverse passed in connector list and get encoders
9861
		/* Otherwise traverse passed in connector list and get encoders
9192
		 * for them. */
9862
		 * for them. */
9193
		for (ro = 0; ro < set->num_connectors; ro++) {
9863
		for (ro = 0; ro < set->num_connectors; ro++) {
9194
			if (set->connectors[ro] == &connector->base) {
9864
			if (set->connectors[ro] == &connector->base) {
9195
				connector->new_encoder = connector->encoder;
9865
				connector->new_encoder = connector->encoder;
9196
				break;
9866
				break;
9197
			}
9867
			}
9198
		}
9868
		}
9199
 
9869
 
9200
		/* If we disable the crtc, disable all its connectors. Also, if
9870
		/* If we disable the crtc, disable all its connectors. Also, if
9201
		 * the connector is on the changing crtc but not on the new
9871
		 * the connector is on the changing crtc but not on the new
9202
		 * connector list, disable it. */
9872
		 * connector list, disable it. */
9203
		if ((!set->fb || ro == set->num_connectors) &&
9873
		if ((!set->fb || ro == set->num_connectors) &&
9204
		    connector->base.encoder &&
9874
		    connector->base.encoder &&
9205
		    connector->base.encoder->crtc == set->crtc) {
9875
		    connector->base.encoder->crtc == set->crtc) {
9206
			connector->new_encoder = NULL;
9876
			connector->new_encoder = NULL;
9207
 
9877
 
9208
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
9878
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
9209
				connector->base.base.id,
9879
				connector->base.base.id,
9210
				drm_get_connector_name(&connector->base));
9880
				drm_get_connector_name(&connector->base));
9211
		}
9881
		}
9212
 
9882
 
9213
 
9883
 
9214
		if (&connector->new_encoder->base != connector->base.encoder) {
9884
		if (&connector->new_encoder->base != connector->base.encoder) {
9215
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
9885
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
9216
			config->mode_changed = true;
9886
			config->mode_changed = true;
9217
		}
9887
		}
9218
	}
9888
	}
9219
	/* connector->new_encoder is now updated for all connectors. */
9889
	/* connector->new_encoder is now updated for all connectors. */
9220
 
9890
 
9221
	/* Update crtc of enabled connectors. */
9891
	/* Update crtc of enabled connectors. */
9222
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9892
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9223
			    base.head) {
9893
			    base.head) {
9224
		if (!connector->new_encoder)
9894
		if (!connector->new_encoder)
9225
			continue;
9895
			continue;
9226
 
9896
 
9227
		new_crtc = connector->new_encoder->base.crtc;
9897
		new_crtc = connector->new_encoder->base.crtc;
9228
 
9898
 
9229
		for (ro = 0; ro < set->num_connectors; ro++) {
9899
		for (ro = 0; ro < set->num_connectors; ro++) {
9230
			if (set->connectors[ro] == &connector->base)
9900
			if (set->connectors[ro] == &connector->base)
9231
				new_crtc = set->crtc;
9901
				new_crtc = set->crtc;
9232
		}
9902
		}
9233
 
9903
 
9234
		/* Make sure the new CRTC will work with the encoder */
9904
		/* Make sure the new CRTC will work with the encoder */
9235
		if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
9905
		if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
9236
					   new_crtc)) {
9906
					   new_crtc)) {
9237
			return -EINVAL;
9907
			return -EINVAL;
9238
		}
9908
		}
9239
		connector->encoder->new_crtc = to_intel_crtc(new_crtc);
9909
		connector->encoder->new_crtc = to_intel_crtc(new_crtc);
9240
 
9910
 
9241
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
9911
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
9242
			connector->base.base.id,
9912
			connector->base.base.id,
9243
			drm_get_connector_name(&connector->base),
9913
			drm_get_connector_name(&connector->base),
9244
			new_crtc->base.id);
9914
			new_crtc->base.id);
9245
	}
9915
	}
9246
 
9916
 
9247
	/* Check for any encoders that needs to be disabled. */
9917
	/* Check for any encoders that needs to be disabled. */
9248
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9918
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9249
			    base.head) {
9919
			    base.head) {
-
 
9920
		int num_connectors = 0;
9250
		list_for_each_entry(connector,
9921
		list_for_each_entry(connector,
9251
				    &dev->mode_config.connector_list,
9922
				    &dev->mode_config.connector_list,
9252
				    base.head) {
9923
				    base.head) {
9253
			if (connector->new_encoder == encoder) {
9924
			if (connector->new_encoder == encoder) {
9254
				WARN_ON(!connector->new_encoder->new_crtc);
9925
				WARN_ON(!connector->new_encoder->new_crtc);
9255
 
-
 
9256
				goto next_encoder;
9926
				num_connectors++;
9257
			}
9927
			}
9258
		}
9928
		}
-
 
9929
 
-
 
9930
		if (num_connectors == 0)
9259
		encoder->new_crtc = NULL;
9931
		encoder->new_crtc = NULL;
-
 
9932
		else if (num_connectors > 1)
9260
next_encoder:
9933
			return -EINVAL;
-
 
9934
 
9261
		/* Only now check for crtc changes so we don't miss encoders
9935
		/* Only now check for crtc changes so we don't miss encoders
9262
		 * that will be disabled. */
9936
		 * that will be disabled. */
9263
		if (&encoder->new_crtc->base != encoder->base.crtc) {
9937
		if (&encoder->new_crtc->base != encoder->base.crtc) {
9264
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
9938
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
9265
			config->mode_changed = true;
9939
			config->mode_changed = true;
9266
		}
9940
		}
9267
	}
9941
	}
9268
	/* Now we've also updated encoder->new_crtc for all encoders. */
9942
	/* Now we've also updated encoder->new_crtc for all encoders. */
9269
 
9943
 
9270
	return 0;
9944
	return 0;
9271
}
9945
}
9272
 
9946
 
9273
static int intel_crtc_set_config(struct drm_mode_set *set)
9947
static int intel_crtc_set_config(struct drm_mode_set *set)
9274
{
9948
{
9275
	struct drm_device *dev;
9949
	struct drm_device *dev;
9276
	struct drm_mode_set save_set;
9950
	struct drm_mode_set save_set;
9277
	struct intel_set_config *config;
9951
	struct intel_set_config *config;
9278
	int ret;
9952
	int ret;
9279
 
9953
 
9280
	BUG_ON(!set);
9954
	BUG_ON(!set);
9281
	BUG_ON(!set->crtc);
9955
	BUG_ON(!set->crtc);
9282
	BUG_ON(!set->crtc->helper_private);
9956
	BUG_ON(!set->crtc->helper_private);
9283
 
9957
 
9284
	/* Enforce sane interface api - has been abused by the fb helper. */
9958
	/* Enforce sane interface api - has been abused by the fb helper. */
9285
	BUG_ON(!set->mode && set->fb);
9959
	BUG_ON(!set->mode && set->fb);
9286
	BUG_ON(set->fb && set->num_connectors == 0);
9960
	BUG_ON(set->fb && set->num_connectors == 0);
9287
 
9961
 
9288
	if (set->fb) {
9962
	if (set->fb) {
9289
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
9963
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
9290
				set->crtc->base.id, set->fb->base.id,
9964
				set->crtc->base.id, set->fb->base.id,
9291
				(int)set->num_connectors, set->x, set->y);
9965
				(int)set->num_connectors, set->x, set->y);
9292
	} else {
9966
	} else {
9293
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
9967
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
9294
	}
9968
	}
9295
 
9969
 
9296
	dev = set->crtc->dev;
9970
	dev = set->crtc->dev;
9297
 
9971
 
9298
	ret = -ENOMEM;
9972
	ret = -ENOMEM;
9299
	config = kzalloc(sizeof(*config), GFP_KERNEL);
9973
	config = kzalloc(sizeof(*config), GFP_KERNEL);
9300
	if (!config)
9974
	if (!config)
9301
		goto out_config;
9975
		goto out_config;
9302
 
9976
 
9303
	ret = intel_set_config_save_state(dev, config);
9977
	ret = intel_set_config_save_state(dev, config);
9304
	if (ret)
9978
	if (ret)
9305
		goto out_config;
9979
		goto out_config;
9306
 
9980
 
9307
	save_set.crtc = set->crtc;
9981
	save_set.crtc = set->crtc;
9308
	save_set.mode = &set->crtc->mode;
9982
	save_set.mode = &set->crtc->mode;
9309
	save_set.x = set->crtc->x;
9983
	save_set.x = set->crtc->x;
9310
	save_set.y = set->crtc->y;
9984
	save_set.y = set->crtc->y;
9311
	save_set.fb = set->crtc->fb;
9985
	save_set.fb = set->crtc->fb;
9312
 
9986
 
9313
	/* Compute whether we need a full modeset, only an fb base update or no
9987
	/* Compute whether we need a full modeset, only an fb base update or no
9314
	 * change at all. In the future we might also check whether only the
9988
	 * change at all. In the future we might also check whether only the
9315
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
9989
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
9316
	 * such cases. */
9990
	 * such cases. */
9317
	intel_set_config_compute_mode_changes(set, config);
9991
	intel_set_config_compute_mode_changes(set, config);
9318
 
9992
 
9319
	ret = intel_modeset_stage_output_state(dev, set, config);
9993
	ret = intel_modeset_stage_output_state(dev, set, config);
9320
	if (ret)
9994
	if (ret)
9321
		goto fail;
9995
		goto fail;
9322
 
9996
 
9323
	if (config->mode_changed) {
9997
	if (config->mode_changed) {
9324
		ret = intel_set_mode(set->crtc, set->mode,
9998
		ret = intel_set_mode(set->crtc, set->mode,
9325
				     set->x, set->y, set->fb);
9999
				     set->x, set->y, set->fb);
9326
	} else if (config->fb_changed) {
10000
	} else if (config->fb_changed) {
9327
//       intel_crtc_wait_for_pending_flips(set->crtc);
10001
//       intel_crtc_wait_for_pending_flips(set->crtc);
9328
 
10002
 
9329
		ret = intel_pipe_set_base(set->crtc,
10003
		ret = intel_pipe_set_base(set->crtc,
9330
					  set->x, set->y, set->fb);
10004
					  set->x, set->y, set->fb);
-
 
10005
		/*
-
 
10006
		 * In the fastboot case this may be our only check of the
-
 
10007
		 * state after boot.  It would be better to only do it on
-
 
10008
		 * the first update, but we don't have a nice way of doing that
-
 
10009
		 * (and really, set_config isn't used much for high freq page
-
 
10010
		 * flipping, so increasing its cost here shouldn't be a big
-
 
10011
		 * deal).
-
 
10012
		 */
-
 
10013
		if (i915_fastboot && ret == 0)
-
 
10014
			intel_modeset_check_state(set->crtc->dev);
9331
	}
10015
	}
9332
 
10016
 
9333
	if (ret) {
10017
	if (ret) {
9334
		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
10018
		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
9335
			  set->crtc->base.id, ret);
10019
			  set->crtc->base.id, ret);
9336
fail:
10020
fail:
9337
	intel_set_config_restore_state(dev, config);
10021
	intel_set_config_restore_state(dev, config);
9338
 
10022
 
9339
	/* Try to restore the config */
10023
	/* Try to restore the config */
9340
	if (config->mode_changed &&
10024
	if (config->mode_changed &&
9341
	    intel_set_mode(save_set.crtc, save_set.mode,
10025
	    intel_set_mode(save_set.crtc, save_set.mode,
9342
			    save_set.x, save_set.y, save_set.fb))
10026
			    save_set.x, save_set.y, save_set.fb))
9343
		DRM_ERROR("failed to restore config after modeset failure\n");
10027
		DRM_ERROR("failed to restore config after modeset failure\n");
9344
	}
10028
	}
9345
 
10029
 
9346
out_config:
10030
out_config:
9347
	intel_set_config_free(config);
10031
	intel_set_config_free(config);
9348
	return ret;
10032
	return ret;
9349
}
10033
}
9350
 
10034
 
9351
static const struct drm_crtc_funcs intel_crtc_funcs = {
10035
static const struct drm_crtc_funcs intel_crtc_funcs = {
9352
//	.cursor_set = intel_crtc_cursor_set,
10036
//	.cursor_set = intel_crtc_cursor_set,
9353
	.cursor_move = intel_crtc_cursor_move,
10037
	.cursor_move = intel_crtc_cursor_move,
9354
	.gamma_set = intel_crtc_gamma_set,
10038
	.gamma_set = intel_crtc_gamma_set,
9355
	.set_config = intel_crtc_set_config,
10039
	.set_config = intel_crtc_set_config,
9356
	.destroy = intel_crtc_destroy,
10040
	.destroy = intel_crtc_destroy,
9357
//	.page_flip = intel_crtc_page_flip,
10041
//	.page_flip = intel_crtc_page_flip,
9358
};
10042
};
9359
 
10043
 
9360
static void intel_cpu_pll_init(struct drm_device *dev)
10044
static void intel_cpu_pll_init(struct drm_device *dev)
9361
{
10045
{
9362
	if (HAS_DDI(dev))
10046
	if (HAS_DDI(dev))
9363
		intel_ddi_pll_init(dev);
10047
		intel_ddi_pll_init(dev);
9364
}
10048
}
9365
 
10049
 
9366
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
10050
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
9367
				      struct intel_shared_dpll *pll,
10051
				      struct intel_shared_dpll *pll,
9368
				      struct intel_dpll_hw_state *hw_state)
10052
				      struct intel_dpll_hw_state *hw_state)
9369
{
10053
{
9370
	uint32_t val;
10054
	uint32_t val;
9371
 
10055
 
9372
	val = I915_READ(PCH_DPLL(pll->id));
10056
	val = I915_READ(PCH_DPLL(pll->id));
9373
	hw_state->dpll = val;
10057
	hw_state->dpll = val;
9374
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
10058
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
9375
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
10059
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
9376
 
10060
 
9377
	return val & DPLL_VCO_ENABLE;
10061
	return val & DPLL_VCO_ENABLE;
9378
}
10062
}
9379
 
10063
 
9380
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
10064
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
9381
				  struct intel_shared_dpll *pll)
10065
				  struct intel_shared_dpll *pll)
9382
{
10066
{
9383
	I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
10067
	I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
9384
	I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
10068
	I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
9385
}
10069
}
9386
 
10070
 
9387
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
10071
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
9388
				struct intel_shared_dpll *pll)
10072
				struct intel_shared_dpll *pll)
9389
{
10073
{
9390
	/* PCH refclock must be enabled first */
10074
	/* PCH refclock must be enabled first */
9391
	assert_pch_refclk_enabled(dev_priv);
10075
	ibx_assert_pch_refclk_enabled(dev_priv);
9392
 
10076
 
9393
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10077
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
9394
 
10078
 
9395
	/* Wait for the clocks to stabilize. */
10079
	/* Wait for the clocks to stabilize. */
9396
	POSTING_READ(PCH_DPLL(pll->id));
10080
	POSTING_READ(PCH_DPLL(pll->id));
9397
	udelay(150);
10081
	udelay(150);
9398
 
10082
 
9399
	/* The pixel multiplier can only be updated once the
10083
	/* The pixel multiplier can only be updated once the
9400
	 * DPLL is enabled and the clocks are stable.
10084
	 * DPLL is enabled and the clocks are stable.
9401
	 *
10085
	 *
9402
	 * So write it again.
10086
	 * So write it again.
9403
	 */
10087
	 */
9404
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10088
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
9405
	POSTING_READ(PCH_DPLL(pll->id));
10089
	POSTING_READ(PCH_DPLL(pll->id));
9406
	udelay(200);
10090
	udelay(200);
9407
}
10091
}
9408
 
10092
 
9409
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
10093
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
9410
				 struct intel_shared_dpll *pll)
10094
				 struct intel_shared_dpll *pll)
9411
{
10095
{
9412
	struct drm_device *dev = dev_priv->dev;
10096
	struct drm_device *dev = dev_priv->dev;
9413
	struct intel_crtc *crtc;
10097
	struct intel_crtc *crtc;
9414
 
10098
 
9415
	/* Make sure no transcoder isn't still depending on us. */
10099
	/* Make sure no transcoder isn't still depending on us. */
9416
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
10100
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
9417
		if (intel_crtc_to_shared_dpll(crtc) == pll)
10101
		if (intel_crtc_to_shared_dpll(crtc) == pll)
9418
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
10102
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
9419
	}
10103
	}
9420
 
10104
 
9421
	I915_WRITE(PCH_DPLL(pll->id), 0);
10105
	I915_WRITE(PCH_DPLL(pll->id), 0);
9422
	POSTING_READ(PCH_DPLL(pll->id));
10106
	POSTING_READ(PCH_DPLL(pll->id));
9423
	udelay(200);
10107
	udelay(200);
9424
}
10108
}
9425
 
10109
 
9426
static char *ibx_pch_dpll_names[] = {
10110
static char *ibx_pch_dpll_names[] = {
9427
	"PCH DPLL A",
10111
	"PCH DPLL A",
9428
	"PCH DPLL B",
10112
	"PCH DPLL B",
9429
};
10113
};
9430
 
10114
 
9431
static void ibx_pch_dpll_init(struct drm_device *dev)
10115
static void ibx_pch_dpll_init(struct drm_device *dev)
9432
{
10116
{
9433
	struct drm_i915_private *dev_priv = dev->dev_private;
10117
	struct drm_i915_private *dev_priv = dev->dev_private;
9434
	int i;
10118
	int i;
9435
 
10119
 
9436
	dev_priv->num_shared_dpll = 2;
10120
	dev_priv->num_shared_dpll = 2;
9437
 
10121
 
9438
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10122
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9439
		dev_priv->shared_dplls[i].id = i;
10123
		dev_priv->shared_dplls[i].id = i;
9440
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
10124
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
9441
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
10125
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
9442
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
10126
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
9443
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
10127
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
9444
		dev_priv->shared_dplls[i].get_hw_state =
10128
		dev_priv->shared_dplls[i].get_hw_state =
9445
			ibx_pch_dpll_get_hw_state;
10129
			ibx_pch_dpll_get_hw_state;
9446
	}
10130
	}
9447
}
10131
}
9448
 
10132
 
9449
static void intel_shared_dpll_init(struct drm_device *dev)
10133
static void intel_shared_dpll_init(struct drm_device *dev)
9450
{
10134
{
9451
	struct drm_i915_private *dev_priv = dev->dev_private;
10135
	struct drm_i915_private *dev_priv = dev->dev_private;
9452
 
10136
 
9453
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
10137
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
9454
		ibx_pch_dpll_init(dev);
10138
		ibx_pch_dpll_init(dev);
9455
	else
10139
	else
9456
		dev_priv->num_shared_dpll = 0;
10140
		dev_priv->num_shared_dpll = 0;
9457
 
10141
 
9458
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
10142
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
9459
	DRM_DEBUG_KMS("%i shared PLLs initialized\n",
-
 
9460
		      dev_priv->num_shared_dpll);
-
 
9461
}
10143
}
9462
 
10144
 
9463
static void intel_crtc_init(struct drm_device *dev, int pipe)
10145
static void intel_crtc_init(struct drm_device *dev, int pipe)
9464
{
10146
{
9465
	drm_i915_private_t *dev_priv = dev->dev_private;
10147
	drm_i915_private_t *dev_priv = dev->dev_private;
9466
	struct intel_crtc *intel_crtc;
10148
	struct intel_crtc *intel_crtc;
9467
	int i;
10149
	int i;
9468
 
10150
 
9469
	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
10151
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
9470
	if (intel_crtc == NULL)
10152
	if (intel_crtc == NULL)
9471
		return;
10153
		return;
9472
 
10154
 
9473
	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
10155
	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
9474
 
10156
 
9475
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
10157
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
9476
	for (i = 0; i < 256; i++) {
10158
	for (i = 0; i < 256; i++) {
9477
		intel_crtc->lut_r[i] = i;
10159
		intel_crtc->lut_r[i] = i;
9478
		intel_crtc->lut_g[i] = i;
10160
		intel_crtc->lut_g[i] = i;
9479
		intel_crtc->lut_b[i] = i;
10161
		intel_crtc->lut_b[i] = i;
9480
	}
10162
	}
-
 
10163
 
-
 
10164
	/*
9481
 
10165
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
-
 
10166
	 * is hooked to plane B. Hence we want plane A feeding pipe B.
9482
	/* Swap pipes & planes for FBC on pre-965 */
10167
	 */
9483
	intel_crtc->pipe = pipe;
10168
	intel_crtc->pipe = pipe;
9484
	intel_crtc->plane = pipe;
10169
	intel_crtc->plane = pipe;
9485
	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
10170
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
9486
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
10171
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
9487
		intel_crtc->plane = !pipe;
10172
		intel_crtc->plane = !pipe;
9488
	}
10173
	}
9489
 
10174
 
9490
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
10175
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
9491
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
10176
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
9492
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
10177
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
9493
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
10178
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
9494
 
10179
 
9495
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
10180
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
9496
}
10181
}
-
 
10182
 
-
 
10183
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
-
 
10184
{
-
 
10185
	struct drm_encoder *encoder = connector->base.encoder;
-
 
10186
 
-
 
10187
	WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
-
 
10188
 
-
 
10189
	if (!encoder)
-
 
10190
		return INVALID_PIPE;
-
 
10191
 
-
 
10192
	return to_intel_crtc(encoder->crtc)->pipe;
-
 
10193
}
9497
 
10194
 
9498
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
10195
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
9499
				struct drm_file *file)
10196
				struct drm_file *file)
9500
{
10197
{
9501
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
10198
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
9502
	struct drm_mode_object *drmmode_obj;
10199
	struct drm_mode_object *drmmode_obj;
9503
	struct intel_crtc *crtc;
10200
	struct intel_crtc *crtc;
9504
 
10201
 
9505
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
10202
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
9506
		return -ENODEV;
10203
		return -ENODEV;
9507
 
10204
 
9508
	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
10205
	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
9509
			DRM_MODE_OBJECT_CRTC);
10206
			DRM_MODE_OBJECT_CRTC);
9510
 
10207
 
9511
	if (!drmmode_obj) {
10208
	if (!drmmode_obj) {
9512
		DRM_ERROR("no such CRTC id\n");
10209
		DRM_ERROR("no such CRTC id\n");
9513
		return -EINVAL;
10210
		return -ENOENT;
9514
	}
10211
	}
9515
 
10212
 
9516
	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
10213
	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
9517
	pipe_from_crtc_id->pipe = crtc->pipe;
10214
	pipe_from_crtc_id->pipe = crtc->pipe;
9518
 
10215
 
9519
	return 0;
10216
	return 0;
9520
}
10217
}
9521
 
10218
 
9522
static int intel_encoder_clones(struct intel_encoder *encoder)
10219
static int intel_encoder_clones(struct intel_encoder *encoder)
9523
{
10220
{
9524
	struct drm_device *dev = encoder->base.dev;
10221
	struct drm_device *dev = encoder->base.dev;
9525
	struct intel_encoder *source_encoder;
10222
	struct intel_encoder *source_encoder;
9526
	int index_mask = 0;
10223
	int index_mask = 0;
9527
	int entry = 0;
10224
	int entry = 0;
9528
 
10225
 
9529
	list_for_each_entry(source_encoder,
10226
	list_for_each_entry(source_encoder,
9530
			    &dev->mode_config.encoder_list, base.head) {
10227
			    &dev->mode_config.encoder_list, base.head) {
9531
 
10228
 
9532
		if (encoder == source_encoder)
10229
		if (encoder == source_encoder)
9533
			index_mask |= (1 << entry);
10230
			index_mask |= (1 << entry);
9534
 
10231
 
9535
		/* Intel hw has only one MUX where enocoders could be cloned. */
10232
		/* Intel hw has only one MUX where enocoders could be cloned. */
9536
		if (encoder->cloneable && source_encoder->cloneable)
10233
		if (encoder->cloneable && source_encoder->cloneable)
9537
			index_mask |= (1 << entry);
10234
			index_mask |= (1 << entry);
9538
 
10235
 
9539
		entry++;
10236
		entry++;
9540
	}
10237
	}
9541
 
10238
 
9542
	return index_mask;
10239
	return index_mask;
9543
}
10240
}
9544
 
10241
 
9545
static bool has_edp_a(struct drm_device *dev)
10242
static bool has_edp_a(struct drm_device *dev)
9546
{
10243
{
9547
	struct drm_i915_private *dev_priv = dev->dev_private;
10244
	struct drm_i915_private *dev_priv = dev->dev_private;
9548
 
10245
 
9549
	if (!IS_MOBILE(dev))
10246
	if (!IS_MOBILE(dev))
9550
		return false;
10247
		return false;
9551
 
10248
 
9552
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
10249
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
9553
		return false;
10250
		return false;
9554
 
10251
 
9555
	if (IS_GEN5(dev) &&
10252
	if (IS_GEN5(dev) &&
9556
	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
10253
	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
9557
		return false;
10254
		return false;
9558
 
10255
 
9559
	return true;
10256
	return true;
9560
}
10257
}
-
 
10258
 
-
 
10259
const char *intel_output_name(int output)
-
 
10260
{
-
 
10261
	static const char *names[] = {
-
 
10262
		[INTEL_OUTPUT_UNUSED] = "Unused",
-
 
10263
		[INTEL_OUTPUT_ANALOG] = "Analog",
-
 
10264
		[INTEL_OUTPUT_DVO] = "DVO",
-
 
10265
		[INTEL_OUTPUT_SDVO] = "SDVO",
-
 
10266
		[INTEL_OUTPUT_LVDS] = "LVDS",
-
 
10267
		[INTEL_OUTPUT_TVOUT] = "TV",
-
 
10268
		[INTEL_OUTPUT_HDMI] = "HDMI",
-
 
10269
		[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
-
 
10270
		[INTEL_OUTPUT_EDP] = "eDP",
-
 
10271
		[INTEL_OUTPUT_DSI] = "DSI",
-
 
10272
		[INTEL_OUTPUT_UNKNOWN] = "Unknown",
-
 
10273
	};
-
 
10274
 
-
 
10275
	if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
-
 
10276
		return "Invalid";
-
 
10277
 
-
 
10278
	return names[output];
-
 
10279
}
9561
 
10280
 
9562
static void intel_setup_outputs(struct drm_device *dev)
10281
static void intel_setup_outputs(struct drm_device *dev)
9563
{
10282
{
9564
	struct drm_i915_private *dev_priv = dev->dev_private;
10283
	struct drm_i915_private *dev_priv = dev->dev_private;
9565
	struct intel_encoder *encoder;
10284
	struct intel_encoder *encoder;
9566
	bool dpd_is_edp = false;
10285
	bool dpd_is_edp = false;
9567
 
10286
 
9568
	intel_lvds_init(dev);
10287
	intel_lvds_init(dev);
9569
 
10288
 
9570
	if (!IS_ULT(dev))
10289
	if (!IS_ULT(dev))
9571
	intel_crt_init(dev);
10290
	intel_crt_init(dev);
9572
 
10291
 
9573
	if (HAS_DDI(dev)) {
10292
	if (HAS_DDI(dev)) {
9574
		int found;
10293
		int found;
9575
 
10294
 
9576
		/* Haswell uses DDI functions to detect digital outputs */
10295
		/* Haswell uses DDI functions to detect digital outputs */
9577
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
10296
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
9578
		/* DDI A only supports eDP */
10297
		/* DDI A only supports eDP */
9579
		if (found)
10298
		if (found)
9580
			intel_ddi_init(dev, PORT_A);
10299
			intel_ddi_init(dev, PORT_A);
9581
 
10300
 
9582
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
10301
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
9583
		 * register */
10302
		 * register */
9584
		found = I915_READ(SFUSE_STRAP);
10303
		found = I915_READ(SFUSE_STRAP);
9585
 
10304
 
9586
		if (found & SFUSE_STRAP_DDIB_DETECTED)
10305
		if (found & SFUSE_STRAP_DDIB_DETECTED)
9587
			intel_ddi_init(dev, PORT_B);
10306
			intel_ddi_init(dev, PORT_B);
9588
		if (found & SFUSE_STRAP_DDIC_DETECTED)
10307
		if (found & SFUSE_STRAP_DDIC_DETECTED)
9589
			intel_ddi_init(dev, PORT_C);
10308
			intel_ddi_init(dev, PORT_C);
9590
		if (found & SFUSE_STRAP_DDID_DETECTED)
10309
		if (found & SFUSE_STRAP_DDID_DETECTED)
9591
			intel_ddi_init(dev, PORT_D);
10310
			intel_ddi_init(dev, PORT_D);
9592
	} else if (HAS_PCH_SPLIT(dev)) {
10311
	} else if (HAS_PCH_SPLIT(dev)) {
9593
		int found;
10312
		int found;
9594
		dpd_is_edp = intel_dpd_is_edp(dev);
10313
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
9595
 
10314
 
9596
		if (has_edp_a(dev))
10315
		if (has_edp_a(dev))
9597
			intel_dp_init(dev, DP_A, PORT_A);
10316
			intel_dp_init(dev, DP_A, PORT_A);
9598
 
10317
 
9599
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
10318
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
9600
			/* PCH SDVOB multiplex with HDMIB */
10319
			/* PCH SDVOB multiplex with HDMIB */
9601
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
10320
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
9602
			if (!found)
10321
			if (!found)
9603
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
10322
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
9604
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
10323
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
9605
				intel_dp_init(dev, PCH_DP_B, PORT_B);
10324
				intel_dp_init(dev, PCH_DP_B, PORT_B);
9606
		}
10325
		}
9607
 
10326
 
9608
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
10327
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
9609
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
10328
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
9610
 
10329
 
9611
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
10330
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
9612
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
10331
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
9613
 
10332
 
9614
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
10333
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
9615
			intel_dp_init(dev, PCH_DP_C, PORT_C);
10334
			intel_dp_init(dev, PCH_DP_C, PORT_C);
9616
 
10335
 
9617
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
10336
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
9618
			intel_dp_init(dev, PCH_DP_D, PORT_D);
10337
			intel_dp_init(dev, PCH_DP_D, PORT_D);
9619
	} else if (IS_VALLEYVIEW(dev)) {
10338
	} else if (IS_VALLEYVIEW(dev)) {
9620
		/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
-
 
9621
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
-
 
9622
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
-
 
9623
					PORT_C);
-
 
9624
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
-
 
9625
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
-
 
9626
					      PORT_C);
-
 
9627
		}
-
 
9628
 
-
 
9629
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
10339
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
9630
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
10340
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
9631
					PORT_B);
10341
					PORT_B);
9632
			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
10342
			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
9633
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
10343
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
9634
		}
10344
		}
-
 
10345
 
-
 
10346
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
-
 
10347
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
-
 
10348
					PORT_C);
-
 
10349
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
-
 
10350
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
-
 
10351
		}
-
 
10352
 
-
 
10353
		intel_dsi_init(dev);
9635
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
10354
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
9636
		bool found = false;
10355
		bool found = false;
9637
 
10356
 
9638
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
10357
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
9639
			DRM_DEBUG_KMS("probing SDVOB\n");
10358
			DRM_DEBUG_KMS("probing SDVOB\n");
9640
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
10359
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
9641
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
10360
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
9642
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
10361
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
9643
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
10362
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
9644
			}
10363
			}
9645
 
10364
 
9646
			if (!found && SUPPORTS_INTEGRATED_DP(dev))
10365
			if (!found && SUPPORTS_INTEGRATED_DP(dev))
9647
				intel_dp_init(dev, DP_B, PORT_B);
10366
				intel_dp_init(dev, DP_B, PORT_B);
9648
			}
10367
			}
9649
 
10368
 
9650
		/* Before G4X SDVOC doesn't have its own detect register */
10369
		/* Before G4X SDVOC doesn't have its own detect register */
9651
 
10370
 
9652
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
10371
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
9653
			DRM_DEBUG_KMS("probing SDVOC\n");
10372
			DRM_DEBUG_KMS("probing SDVOC\n");
9654
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
10373
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
9655
		}
10374
		}
9656
 
10375
 
9657
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
10376
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
9658
 
10377
 
9659
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
10378
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
9660
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
10379
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
9661
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
10380
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
9662
			}
10381
			}
9663
			if (SUPPORTS_INTEGRATED_DP(dev))
10382
			if (SUPPORTS_INTEGRATED_DP(dev))
9664
				intel_dp_init(dev, DP_C, PORT_C);
10383
				intel_dp_init(dev, DP_C, PORT_C);
9665
			}
10384
			}
9666
 
10385
 
9667
		if (SUPPORTS_INTEGRATED_DP(dev) &&
10386
		if (SUPPORTS_INTEGRATED_DP(dev) &&
9668
		    (I915_READ(DP_D) & DP_DETECTED))
10387
		    (I915_READ(DP_D) & DP_DETECTED))
9669
			intel_dp_init(dev, DP_D, PORT_D);
10388
			intel_dp_init(dev, DP_D, PORT_D);
9670
	} else if (IS_GEN2(dev))
10389
	} else if (IS_GEN2(dev))
9671
		intel_dvo_init(dev);
10390
		intel_dvo_init(dev);
9672
 
10391
 
9673
//   if (SUPPORTS_TV(dev))
10392
//   if (SUPPORTS_TV(dev))
9674
//       intel_tv_init(dev);
10393
//       intel_tv_init(dev);
9675
 
10394
 
9676
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10395
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9677
		encoder->base.possible_crtcs = encoder->crtc_mask;
10396
		encoder->base.possible_crtcs = encoder->crtc_mask;
9678
		encoder->base.possible_clones =
10397
		encoder->base.possible_clones =
9679
			intel_encoder_clones(encoder);
10398
			intel_encoder_clones(encoder);
9680
	}
10399
	}
9681
 
10400
 
9682
	intel_init_pch_refclk(dev);
10401
	intel_init_pch_refclk(dev);
9683
 
10402
 
9684
	drm_helper_move_panel_connectors_to_head(dev);
10403
	drm_helper_move_panel_connectors_to_head(dev);
9685
}
10404
}
9686
 
10405
 
9687
 
10406
 
9688
 
10407
 
9689
static const struct drm_framebuffer_funcs intel_fb_funcs = {
10408
static const struct drm_framebuffer_funcs intel_fb_funcs = {
9690
//	.destroy = intel_user_framebuffer_destroy,
10409
//	.destroy = intel_user_framebuffer_destroy,
9691
//	.create_handle = intel_user_framebuffer_create_handle,
10410
//	.create_handle = intel_user_framebuffer_create_handle,
9692
};
10411
};
9693
 
10412
 
9694
int intel_framebuffer_init(struct drm_device *dev,
10413
int intel_framebuffer_init(struct drm_device *dev,
9695
			   struct intel_framebuffer *intel_fb,
10414
			   struct intel_framebuffer *intel_fb,
9696
			   struct drm_mode_fb_cmd2 *mode_cmd,
10415
			   struct drm_mode_fb_cmd2 *mode_cmd,
9697
			   struct drm_i915_gem_object *obj)
10416
			   struct drm_i915_gem_object *obj)
9698
{
10417
{
-
 
10418
	int aligned_height, tile_height;
9699
	int pitch_limit;
10419
	int pitch_limit;
9700
	int ret;
10420
	int ret;
-
 
10421
 
-
 
10422
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
9701
 
10423
 
9702
	if (obj->tiling_mode == I915_TILING_Y) {
10424
	if (obj->tiling_mode == I915_TILING_Y) {
9703
		DRM_DEBUG("hardware does not support tiling Y\n");
10425
		DRM_DEBUG("hardware does not support tiling Y\n");
9704
		return -EINVAL;
10426
		return -EINVAL;
9705
	}
10427
	}
9706
 
10428
 
9707
	if (mode_cmd->pitches[0] & 63) {
10429
	if (mode_cmd->pitches[0] & 63) {
9708
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
10430
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
9709
			  mode_cmd->pitches[0]);
10431
			  mode_cmd->pitches[0]);
9710
		return -EINVAL;
10432
		return -EINVAL;
9711
	}
10433
	}
9712
 
10434
 
9713
	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
10435
	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
9714
		pitch_limit = 32*1024;
10436
		pitch_limit = 32*1024;
9715
	} else if (INTEL_INFO(dev)->gen >= 4) {
10437
	} else if (INTEL_INFO(dev)->gen >= 4) {
9716
		if (obj->tiling_mode)
10438
		if (obj->tiling_mode)
9717
			pitch_limit = 16*1024;
10439
			pitch_limit = 16*1024;
9718
		else
10440
		else
9719
			pitch_limit = 32*1024;
10441
			pitch_limit = 32*1024;
9720
	} else if (INTEL_INFO(dev)->gen >= 3) {
10442
	} else if (INTEL_INFO(dev)->gen >= 3) {
9721
		if (obj->tiling_mode)
10443
		if (obj->tiling_mode)
9722
			pitch_limit = 8*1024;
10444
			pitch_limit = 8*1024;
9723
		else
10445
		else
9724
			pitch_limit = 16*1024;
10446
			pitch_limit = 16*1024;
9725
	} else
10447
	} else
9726
		/* XXX DSPC is limited to 4k tiled */
10448
		/* XXX DSPC is limited to 4k tiled */
9727
		pitch_limit = 8*1024;
10449
		pitch_limit = 8*1024;
9728
 
10450
 
9729
	if (mode_cmd->pitches[0] > pitch_limit) {
10451
	if (mode_cmd->pitches[0] > pitch_limit) {
9730
		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
10452
		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
9731
			  obj->tiling_mode ? "tiled" : "linear",
10453
			  obj->tiling_mode ? "tiled" : "linear",
9732
			  mode_cmd->pitches[0], pitch_limit);
10454
			  mode_cmd->pitches[0], pitch_limit);
9733
		return -EINVAL;
10455
		return -EINVAL;
9734
	}
10456
	}
9735
 
10457
 
9736
	if (obj->tiling_mode != I915_TILING_NONE &&
10458
	if (obj->tiling_mode != I915_TILING_NONE &&
9737
	    mode_cmd->pitches[0] != obj->stride) {
10459
	    mode_cmd->pitches[0] != obj->stride) {
9738
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
10460
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
9739
			  mode_cmd->pitches[0], obj->stride);
10461
			  mode_cmd->pitches[0], obj->stride);
9740
			return -EINVAL;
10462
			return -EINVAL;
9741
	}
10463
	}
9742
 
10464
 
9743
	/* Reject formats not supported by any plane early. */
10465
	/* Reject formats not supported by any plane early. */
9744
	switch (mode_cmd->pixel_format) {
10466
	switch (mode_cmd->pixel_format) {
9745
	case DRM_FORMAT_C8:
10467
	case DRM_FORMAT_C8:
9746
	case DRM_FORMAT_RGB565:
10468
	case DRM_FORMAT_RGB565:
9747
	case DRM_FORMAT_XRGB8888:
10469
	case DRM_FORMAT_XRGB8888:
9748
	case DRM_FORMAT_ARGB8888:
10470
	case DRM_FORMAT_ARGB8888:
9749
		break;
10471
		break;
9750
	case DRM_FORMAT_XRGB1555:
10472
	case DRM_FORMAT_XRGB1555:
9751
	case DRM_FORMAT_ARGB1555:
10473
	case DRM_FORMAT_ARGB1555:
9752
		if (INTEL_INFO(dev)->gen > 3) {
10474
		if (INTEL_INFO(dev)->gen > 3) {
9753
			DRM_DEBUG("unsupported pixel format: %s\n",
10475
			DRM_DEBUG("unsupported pixel format: %s\n",
9754
				  drm_get_format_name(mode_cmd->pixel_format));
10476
				  drm_get_format_name(mode_cmd->pixel_format));
9755
			return -EINVAL;
10477
			return -EINVAL;
9756
		}
10478
		}
9757
		break;
10479
		break;
9758
	case DRM_FORMAT_XBGR8888:
10480
	case DRM_FORMAT_XBGR8888:
9759
	case DRM_FORMAT_ABGR8888:
10481
	case DRM_FORMAT_ABGR8888:
9760
	case DRM_FORMAT_XRGB2101010:
10482
	case DRM_FORMAT_XRGB2101010:
9761
	case DRM_FORMAT_ARGB2101010:
10483
	case DRM_FORMAT_ARGB2101010:
9762
	case DRM_FORMAT_XBGR2101010:
10484
	case DRM_FORMAT_XBGR2101010:
9763
	case DRM_FORMAT_ABGR2101010:
10485
	case DRM_FORMAT_ABGR2101010:
9764
		if (INTEL_INFO(dev)->gen < 4) {
10486
		if (INTEL_INFO(dev)->gen < 4) {
9765
			DRM_DEBUG("unsupported pixel format: %s\n",
10487
			DRM_DEBUG("unsupported pixel format: %s\n",
9766
				  drm_get_format_name(mode_cmd->pixel_format));
10488
				  drm_get_format_name(mode_cmd->pixel_format));
9767
			return -EINVAL;
10489
			return -EINVAL;
9768
		}
10490
		}
9769
		break;
10491
		break;
9770
	case DRM_FORMAT_YUYV:
10492
	case DRM_FORMAT_YUYV:
9771
	case DRM_FORMAT_UYVY:
10493
	case DRM_FORMAT_UYVY:
9772
	case DRM_FORMAT_YVYU:
10494
	case DRM_FORMAT_YVYU:
9773
	case DRM_FORMAT_VYUY:
10495
	case DRM_FORMAT_VYUY:
9774
		if (INTEL_INFO(dev)->gen < 5) {
10496
		if (INTEL_INFO(dev)->gen < 5) {
9775
			DRM_DEBUG("unsupported pixel format: %s\n",
10497
			DRM_DEBUG("unsupported pixel format: %s\n",
9776
				  drm_get_format_name(mode_cmd->pixel_format));
10498
				  drm_get_format_name(mode_cmd->pixel_format));
9777
			return -EINVAL;
10499
			return -EINVAL;
9778
		}
10500
		}
9779
		break;
10501
		break;
9780
	default:
10502
	default:
9781
		DRM_DEBUG("unsupported pixel format: %s\n",
10503
		DRM_DEBUG("unsupported pixel format: %s\n",
9782
			  drm_get_format_name(mode_cmd->pixel_format));
10504
			  drm_get_format_name(mode_cmd->pixel_format));
9783
		return -EINVAL;
10505
		return -EINVAL;
9784
	}
10506
	}
9785
 
10507
 
9786
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
10508
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
9787
	if (mode_cmd->offsets[0] != 0)
10509
	if (mode_cmd->offsets[0] != 0)
9788
		return -EINVAL;
10510
		return -EINVAL;
-
 
10511
 
-
 
10512
	tile_height = IS_GEN2(dev) ? 16 : 8;
-
 
10513
	aligned_height = ALIGN(mode_cmd->height,
-
 
10514
			       obj->tiling_mode ? tile_height : 1);
-
 
10515
	/* FIXME drm helper for size checks (especially planar formats)? */
-
 
10516
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
-
 
10517
		return -EINVAL;
9789
 
10518
 
9790
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
10519
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
-
 
10520
	intel_fb->obj = obj;
9791
	intel_fb->obj = obj;
10521
	intel_fb->obj->framebuffer_references++;
9792
 
10522
 
9793
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
10523
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
9794
	if (ret) {
10524
	if (ret) {
9795
		DRM_ERROR("framebuffer init failed %d\n", ret);
10525
		DRM_ERROR("framebuffer init failed %d\n", ret);
9796
		return ret;
10526
		return ret;
9797
	}
10527
	}
9798
 
10528
 
9799
	return 0;
10529
	return 0;
9800
}
10530
}
-
 
10531
 
-
 
10532
#ifndef CONFIG_DRM_I915_FBDEV
-
 
10533
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-
 
10534
{
-
 
10535
}
9801
 
10536
#endif
9802
 
10537
 
9803
static const struct drm_mode_config_funcs intel_mode_funcs = {
10538
static const struct drm_mode_config_funcs intel_mode_funcs = {
9804
	.fb_create = NULL /*intel_user_framebuffer_create*/,
10539
	.fb_create = NULL,
9805
	.output_poll_changed = intel_fb_output_poll_changed,
10540
	.output_poll_changed = intel_fbdev_output_poll_changed,
9806
};
10541
};
9807
 
10542
 
9808
/* Set up chip specific display functions */
10543
/* Set up chip specific display functions */
9809
static void intel_init_display(struct drm_device *dev)
10544
static void intel_init_display(struct drm_device *dev)
9810
{
10545
{
9811
	struct drm_i915_private *dev_priv = dev->dev_private;
10546
	struct drm_i915_private *dev_priv = dev->dev_private;
9812
 
10547
 
9813
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
10548
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
9814
		dev_priv->display.find_dpll = g4x_find_best_dpll;
10549
		dev_priv->display.find_dpll = g4x_find_best_dpll;
9815
	else if (IS_VALLEYVIEW(dev))
10550
	else if (IS_VALLEYVIEW(dev))
9816
		dev_priv->display.find_dpll = vlv_find_best_dpll;
10551
		dev_priv->display.find_dpll = vlv_find_best_dpll;
9817
	else if (IS_PINEVIEW(dev))
10552
	else if (IS_PINEVIEW(dev))
9818
		dev_priv->display.find_dpll = pnv_find_best_dpll;
10553
		dev_priv->display.find_dpll = pnv_find_best_dpll;
9819
	else
10554
	else
9820
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
10555
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
9821
 
10556
 
9822
	if (HAS_DDI(dev)) {
10557
	if (HAS_DDI(dev)) {
9823
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
10558
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
9824
		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
10559
		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
9825
		dev_priv->display.crtc_enable = haswell_crtc_enable;
10560
		dev_priv->display.crtc_enable = haswell_crtc_enable;
9826
		dev_priv->display.crtc_disable = haswell_crtc_disable;
10561
		dev_priv->display.crtc_disable = haswell_crtc_disable;
9827
		dev_priv->display.off = haswell_crtc_off;
10562
		dev_priv->display.off = haswell_crtc_off;
9828
		dev_priv->display.update_plane = ironlake_update_plane;
10563
		dev_priv->display.update_plane = ironlake_update_plane;
9829
	} else if (HAS_PCH_SPLIT(dev)) {
10564
	} else if (HAS_PCH_SPLIT(dev)) {
9830
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
10565
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
9831
		dev_priv->display.get_clock = ironlake_crtc_clock_get;
-
 
9832
		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
10566
		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
9833
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
10567
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
9834
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
10568
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
9835
		dev_priv->display.off = ironlake_crtc_off;
10569
		dev_priv->display.off = ironlake_crtc_off;
9836
		dev_priv->display.update_plane = ironlake_update_plane;
10570
		dev_priv->display.update_plane = ironlake_update_plane;
9837
	} else if (IS_VALLEYVIEW(dev)) {
10571
	} else if (IS_VALLEYVIEW(dev)) {
9838
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
10572
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9839
		dev_priv->display.get_clock = vlv_crtc_clock_get;
-
 
9840
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
10573
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9841
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
10574
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
9842
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
10575
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
9843
		dev_priv->display.off = i9xx_crtc_off;
10576
		dev_priv->display.off = i9xx_crtc_off;
9844
		dev_priv->display.update_plane = i9xx_update_plane;
10577
		dev_priv->display.update_plane = i9xx_update_plane;
9845
	} else {
10578
	} else {
9846
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
10579
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
9847
		dev_priv->display.get_clock = i9xx_crtc_clock_get;
-
 
9848
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
10580
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
9849
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
10581
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
9850
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
10582
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
9851
		dev_priv->display.off = i9xx_crtc_off;
10583
		dev_priv->display.off = i9xx_crtc_off;
9852
		dev_priv->display.update_plane = i9xx_update_plane;
10584
		dev_priv->display.update_plane = i9xx_update_plane;
9853
	}
10585
	}
9854
 
10586
 
9855
	/* Returns the core display clock speed */
10587
	/* Returns the core display clock speed */
9856
	if (IS_VALLEYVIEW(dev))
10588
	if (IS_VALLEYVIEW(dev))
9857
		dev_priv->display.get_display_clock_speed =
10589
		dev_priv->display.get_display_clock_speed =
9858
			valleyview_get_display_clock_speed;
10590
			valleyview_get_display_clock_speed;
9859
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
10591
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
9860
		dev_priv->display.get_display_clock_speed =
10592
		dev_priv->display.get_display_clock_speed =
9861
			i945_get_display_clock_speed;
10593
			i945_get_display_clock_speed;
9862
	else if (IS_I915G(dev))
10594
	else if (IS_I915G(dev))
9863
		dev_priv->display.get_display_clock_speed =
10595
		dev_priv->display.get_display_clock_speed =
9864
			i915_get_display_clock_speed;
10596
			i915_get_display_clock_speed;
9865
	else if (IS_I945GM(dev) || IS_845G(dev))
10597
	else if (IS_I945GM(dev) || IS_845G(dev))
9866
		dev_priv->display.get_display_clock_speed =
10598
		dev_priv->display.get_display_clock_speed =
9867
			i9xx_misc_get_display_clock_speed;
10599
			i9xx_misc_get_display_clock_speed;
9868
	else if (IS_PINEVIEW(dev))
10600
	else if (IS_PINEVIEW(dev))
9869
		dev_priv->display.get_display_clock_speed =
10601
		dev_priv->display.get_display_clock_speed =
9870
			pnv_get_display_clock_speed;
10602
			pnv_get_display_clock_speed;
9871
	else if (IS_I915GM(dev))
10603
	else if (IS_I915GM(dev))
9872
		dev_priv->display.get_display_clock_speed =
10604
		dev_priv->display.get_display_clock_speed =
9873
			i915gm_get_display_clock_speed;
10605
			i915gm_get_display_clock_speed;
9874
	else if (IS_I865G(dev))
10606
	else if (IS_I865G(dev))
9875
		dev_priv->display.get_display_clock_speed =
10607
		dev_priv->display.get_display_clock_speed =
9876
			i865_get_display_clock_speed;
10608
			i865_get_display_clock_speed;
9877
	else if (IS_I85X(dev))
10609
	else if (IS_I85X(dev))
9878
		dev_priv->display.get_display_clock_speed =
10610
		dev_priv->display.get_display_clock_speed =
9879
			i855_get_display_clock_speed;
10611
			i855_get_display_clock_speed;
9880
	else /* 852, 830 */
10612
	else /* 852, 830 */
9881
		dev_priv->display.get_display_clock_speed =
10613
		dev_priv->display.get_display_clock_speed =
9882
			i830_get_display_clock_speed;
10614
			i830_get_display_clock_speed;
9883
 
10615
 
9884
	if (HAS_PCH_SPLIT(dev)) {
10616
	if (HAS_PCH_SPLIT(dev)) {
9885
		if (IS_GEN5(dev)) {
10617
		if (IS_GEN5(dev)) {
9886
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
10618
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
9887
			dev_priv->display.write_eld = ironlake_write_eld;
10619
			dev_priv->display.write_eld = ironlake_write_eld;
9888
		} else if (IS_GEN6(dev)) {
10620
		} else if (IS_GEN6(dev)) {
9889
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
10621
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
9890
			dev_priv->display.write_eld = ironlake_write_eld;
10622
			dev_priv->display.write_eld = ironlake_write_eld;
9891
		} else if (IS_IVYBRIDGE(dev)) {
10623
		} else if (IS_IVYBRIDGE(dev)) {
9892
			/* FIXME: detect B0+ stepping and use auto training */
10624
			/* FIXME: detect B0+ stepping and use auto training */
9893
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
10625
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
9894
			dev_priv->display.write_eld = ironlake_write_eld;
10626
			dev_priv->display.write_eld = ironlake_write_eld;
9895
			dev_priv->display.modeset_global_resources =
10627
			dev_priv->display.modeset_global_resources =
9896
				ivb_modeset_global_resources;
10628
				ivb_modeset_global_resources;
9897
		} else if (IS_HASWELL(dev)) {
10629
		} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
9898
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
10630
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
9899
			dev_priv->display.write_eld = haswell_write_eld;
10631
			dev_priv->display.write_eld = haswell_write_eld;
9900
			dev_priv->display.modeset_global_resources =
10632
			dev_priv->display.modeset_global_resources =
9901
				haswell_modeset_global_resources;
10633
				haswell_modeset_global_resources;
9902
		}
10634
		}
9903
	} else if (IS_G4X(dev)) {
10635
	} else if (IS_G4X(dev)) {
9904
		dev_priv->display.write_eld = g4x_write_eld;
10636
		dev_priv->display.write_eld = g4x_write_eld;
-
 
10637
	} else if (IS_VALLEYVIEW(dev)) {
-
 
10638
		dev_priv->display.modeset_global_resources =
-
 
10639
			valleyview_modeset_global_resources;
-
 
10640
		dev_priv->display.write_eld = ironlake_write_eld;
9905
	}
10641
	}
9906
 
10642
 
9907
	/* Default just returns -ENODEV to indicate unsupported */
10643
	/* Default just returns -ENODEV to indicate unsupported */
9908
//	dev_priv->display.queue_flip = intel_default_queue_flip;
10644
//	dev_priv->display.queue_flip = intel_default_queue_flip;
9909
 
10645
 
9910
 
10646
 
9911
 
10647
 
-
 
10648
 
9912
 
10649
	intel_panel_init_backlight_funcs(dev);
9913
}
10650
}
9914
 
10651
 
9915
/*
10652
/*
9916
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
10653
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
9917
 * resume, or other times.  This quirk makes sure that's the case for
10654
 * resume, or other times.  This quirk makes sure that's the case for
9918
 * affected systems.
10655
 * affected systems.
9919
 */
10656
 */
9920
static void quirk_pipea_force(struct drm_device *dev)
10657
static void quirk_pipea_force(struct drm_device *dev)
9921
{
10658
{
9922
	struct drm_i915_private *dev_priv = dev->dev_private;
10659
	struct drm_i915_private *dev_priv = dev->dev_private;
9923
 
10660
 
9924
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
10661
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9925
	DRM_INFO("applying pipe a force quirk\n");
10662
	DRM_INFO("applying pipe a force quirk\n");
9926
}
10663
}
9927
 
10664
 
9928
/*
10665
/*
9929
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
10666
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
9930
 */
10667
 */
9931
static void quirk_ssc_force_disable(struct drm_device *dev)
10668
static void quirk_ssc_force_disable(struct drm_device *dev)
9932
{
10669
{
9933
	struct drm_i915_private *dev_priv = dev->dev_private;
10670
	struct drm_i915_private *dev_priv = dev->dev_private;
9934
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
10671
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
9935
	DRM_INFO("applying lvds SSC disable quirk\n");
10672
	DRM_INFO("applying lvds SSC disable quirk\n");
9936
}
10673
}
9937
 
10674
 
9938
/*
10675
/*
9939
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
10676
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
9940
 * brightness value
10677
 * brightness value
9941
 */
10678
 */
9942
static void quirk_invert_brightness(struct drm_device *dev)
10679
static void quirk_invert_brightness(struct drm_device *dev)
9943
{
10680
{
9944
	struct drm_i915_private *dev_priv = dev->dev_private;
10681
	struct drm_i915_private *dev_priv = dev->dev_private;
9945
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
10682
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
9946
	DRM_INFO("applying inverted panel brightness quirk\n");
10683
	DRM_INFO("applying inverted panel brightness quirk\n");
9947
}
10684
}
9948
 
-
 
9949
/*
-
 
9950
 * Some machines (Dell XPS13) suffer broken backlight controls if
-
 
9951
 * BLM_PCH_PWM_ENABLE is set.
-
 
9952
 */
-
 
9953
static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
-
 
9954
{
-
 
9955
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
9956
	dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
-
 
9957
	DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
-
 
9958
}
-
 
9959
 
10685
 
9960
struct intel_quirk {
10686
struct intel_quirk {
9961
	int device;
10687
	int device;
9962
	int subsystem_vendor;
10688
	int subsystem_vendor;
9963
	int subsystem_device;
10689
	int subsystem_device;
9964
	void (*hook)(struct drm_device *dev);
10690
	void (*hook)(struct drm_device *dev);
9965
};
10691
};
9966
 
10692
 
9967
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
10693
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
9968
struct intel_dmi_quirk {
10694
struct intel_dmi_quirk {
9969
	void (*hook)(struct drm_device *dev);
10695
	void (*hook)(struct drm_device *dev);
9970
	const struct dmi_system_id (*dmi_id_list)[];
10696
	const struct dmi_system_id (*dmi_id_list)[];
9971
};
10697
};
9972
 
10698
 
9973
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
10699
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
9974
{
10700
{
9975
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
10701
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
9976
	return 1;
10702
	return 1;
9977
}
10703
}
9978
 
10704
 
9979
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
10705
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
9980
	{
10706
	{
9981
		.dmi_id_list = &(const struct dmi_system_id[]) {
10707
		.dmi_id_list = &(const struct dmi_system_id[]) {
9982
			{
10708
			{
9983
				.callback = intel_dmi_reverse_brightness,
10709
				.callback = intel_dmi_reverse_brightness,
9984
				.ident = "NCR Corporation",
10710
				.ident = "NCR Corporation",
9985
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
10711
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
9986
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
10712
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
9987
				},
10713
				},
9988
			},
10714
			},
9989
			{ }  /* terminating entry */
10715
			{ }  /* terminating entry */
9990
		},
10716
		},
9991
		.hook = quirk_invert_brightness,
10717
		.hook = quirk_invert_brightness,
9992
	},
10718
	},
9993
};
10719
};
9994
 
10720
 
9995
static struct intel_quirk intel_quirks[] = {
10721
static struct intel_quirk intel_quirks[] = {
9996
	/* HP Mini needs pipe A force quirk (LP: #322104) */
10722
	/* HP Mini needs pipe A force quirk (LP: #322104) */
9997
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
10723
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
9998
 
10724
 
9999
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
10725
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
10000
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
10726
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
10001
 
10727
 
10002
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
10728
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
10003
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
10729
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
10004
 
10730
 
10005
	/* 830/845 need to leave pipe A & dpll A up */
-
 
10006
	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
10731
	/* 830 needs to leave pipe A & dpll A up */
10007
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
10732
	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
10008
 
10733
 
10009
	/* Lenovo U160 cannot use SSC on LVDS */
10734
	/* Lenovo U160 cannot use SSC on LVDS */
10010
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
10735
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
10011
 
10736
 
10012
	/* Sony Vaio Y cannot use SSC on LVDS */
10737
	/* Sony Vaio Y cannot use SSC on LVDS */
10013
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10738
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10014
 
10739
 
10015
	/* Acer Aspire 5734Z must invert backlight brightness */
10740
	/* Acer Aspire 5734Z must invert backlight brightness */
10016
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
10741
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
10017
 
10742
 
10018
	/* Acer/eMachines G725 */
10743
	/* Acer/eMachines G725 */
10019
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10744
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10020
 
10745
 
10021
	/* Acer/eMachines e725 */
10746
	/* Acer/eMachines e725 */
10022
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10747
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10023
 
10748
 
10024
	/* Acer/Packard Bell NCL20 */
10749
	/* Acer/Packard Bell NCL20 */
10025
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10750
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10026
 
10751
 
10027
	/* Acer Aspire 4736Z */
10752
	/* Acer Aspire 4736Z */
10028
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10753
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10029
 
-
 
10030
	/* Dell XPS13 HD Sandy Bridge */
-
 
10031
	{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
-
 
10032
	/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
-
 
10033
	{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
-
 
10034
};
10754
};
10035
 
10755
 
10036
static void intel_init_quirks(struct drm_device *dev)
10756
static void intel_init_quirks(struct drm_device *dev)
10037
{
10757
{
10038
	struct pci_dev *d = dev->pdev;
10758
	struct pci_dev *d = dev->pdev;
10039
	int i;
10759
	int i;
10040
 
10760
 
10041
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
10761
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
10042
		struct intel_quirk *q = &intel_quirks[i];
10762
		struct intel_quirk *q = &intel_quirks[i];
10043
 
10763
 
10044
		if (d->device == q->device &&
10764
		if (d->device == q->device &&
10045
		    (d->subsystem_vendor == q->subsystem_vendor ||
10765
		    (d->subsystem_vendor == q->subsystem_vendor ||
10046
		     q->subsystem_vendor == PCI_ANY_ID) &&
10766
		     q->subsystem_vendor == PCI_ANY_ID) &&
10047
		    (d->subsystem_device == q->subsystem_device ||
10767
		    (d->subsystem_device == q->subsystem_device ||
10048
		     q->subsystem_device == PCI_ANY_ID))
10768
		     q->subsystem_device == PCI_ANY_ID))
10049
			q->hook(dev);
10769
			q->hook(dev);
10050
	}
10770
	}
10051
}
10771
}
10052
 
10772
 
10053
/* Disable the VGA plane that we never use */
10773
/* Disable the VGA plane that we never use */
10054
static void i915_disable_vga(struct drm_device *dev)
10774
static void i915_disable_vga(struct drm_device *dev)
10055
{
10775
{
10056
	struct drm_i915_private *dev_priv = dev->dev_private;
10776
	struct drm_i915_private *dev_priv = dev->dev_private;
10057
	u8 sr1;
10777
	u8 sr1;
10058
	u32 vga_reg = i915_vgacntrl_reg(dev);
10778
	u32 vga_reg = i915_vgacntrl_reg(dev);
10059
 
10779
 
10060
//   vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10780
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10061
    out8(SR01, VGA_SR_INDEX);
10781
	outb(SR01, VGA_SR_INDEX);
10062
    sr1 = in8(VGA_SR_DATA);
10782
	sr1 = inb(VGA_SR_DATA);
10063
    out8(sr1 | 1<<5, VGA_SR_DATA);
10783
	outb(sr1 | 1<<5, VGA_SR_DATA);
10064
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10784
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10065
	udelay(300);
10785
	udelay(300);
10066
 
10786
 
10067
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
10787
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
10068
	POSTING_READ(vga_reg);
10788
	POSTING_READ(vga_reg);
10069
}
10789
}
10070
 
10790
 
10071
void intel_modeset_init_hw(struct drm_device *dev)
10791
void intel_modeset_init_hw(struct drm_device *dev)
10072
{
10792
{
10073
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
10074
 
-
 
10075
	intel_init_power_well(dev);
-
 
10076
 
-
 
10077
	intel_prepare_ddi(dev);
10793
	intel_prepare_ddi(dev);
10078
 
10794
 
10079
	intel_init_clock_gating(dev);
10795
	intel_init_clock_gating(dev);
10080
 
-
 
10081
	/* Enable the CRI clock source so we can get at the display */
10796
 
10082
	if (IS_VALLEYVIEW(dev))
-
 
10083
		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-
 
10084
			   DPLL_INTEGRATED_CRI_CLK_VLV);
10797
	intel_reset_dpio(dev);
10085
 
10798
 
10086
    mutex_lock(&dev->struct_mutex);
10799
    mutex_lock(&dev->struct_mutex);
10087
    intel_enable_gt_powersave(dev);
10800
    intel_enable_gt_powersave(dev);
10088
    mutex_unlock(&dev->struct_mutex);
10801
    mutex_unlock(&dev->struct_mutex);
10089
}
10802
}
10090
 
10803
 
10091
void intel_modeset_suspend_hw(struct drm_device *dev)
10804
void intel_modeset_suspend_hw(struct drm_device *dev)
10092
{
10805
{
10093
	intel_suspend_hw(dev);
10806
	intel_suspend_hw(dev);
10094
}
10807
}
10095
 
10808
 
10096
void intel_modeset_init(struct drm_device *dev)
10809
void intel_modeset_init(struct drm_device *dev)
10097
{
10810
{
10098
	struct drm_i915_private *dev_priv = dev->dev_private;
10811
	struct drm_i915_private *dev_priv = dev->dev_private;
10099
	int i, j, ret;
10812
	int i, j, ret;
10100
 
10813
 
10101
	drm_mode_config_init(dev);
10814
	drm_mode_config_init(dev);
10102
 
10815
 
10103
	dev->mode_config.min_width = 0;
10816
	dev->mode_config.min_width = 0;
10104
	dev->mode_config.min_height = 0;
10817
	dev->mode_config.min_height = 0;
10105
 
10818
 
10106
	dev->mode_config.preferred_depth = 24;
10819
	dev->mode_config.preferred_depth = 24;
10107
	dev->mode_config.prefer_shadow = 1;
10820
	dev->mode_config.prefer_shadow = 1;
10108
 
10821
 
10109
	dev->mode_config.funcs = &intel_mode_funcs;
10822
	dev->mode_config.funcs = &intel_mode_funcs;
10110
 
10823
 
10111
	intel_init_quirks(dev);
10824
	intel_init_quirks(dev);
10112
 
10825
 
10113
	intel_init_pm(dev);
10826
	intel_init_pm(dev);
10114
 
10827
 
10115
	if (INTEL_INFO(dev)->num_pipes == 0)
10828
	if (INTEL_INFO(dev)->num_pipes == 0)
10116
		return;
10829
		return;
10117
 
10830
 
10118
	intel_init_display(dev);
10831
	intel_init_display(dev);
10119
 
10832
 
10120
	if (IS_GEN2(dev)) {
10833
	if (IS_GEN2(dev)) {
10121
		dev->mode_config.max_width = 2048;
10834
		dev->mode_config.max_width = 2048;
10122
		dev->mode_config.max_height = 2048;
10835
		dev->mode_config.max_height = 2048;
10123
	} else if (IS_GEN3(dev)) {
10836
	} else if (IS_GEN3(dev)) {
10124
		dev->mode_config.max_width = 4096;
10837
		dev->mode_config.max_width = 4096;
10125
		dev->mode_config.max_height = 4096;
10838
		dev->mode_config.max_height = 4096;
10126
	} else {
10839
	} else {
10127
		dev->mode_config.max_width = 8192;
10840
		dev->mode_config.max_width = 8192;
10128
		dev->mode_config.max_height = 8192;
10841
		dev->mode_config.max_height = 8192;
10129
	}
10842
	}
10130
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
10843
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
10131
 
10844
 
10132
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
10845
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
10133
		      INTEL_INFO(dev)->num_pipes,
10846
		      INTEL_INFO(dev)->num_pipes,
10134
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
10847
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
10135
 
10848
 
10136
	for_each_pipe(i) {
10849
	for_each_pipe(i) {
10137
		intel_crtc_init(dev, i);
10850
		intel_crtc_init(dev, i);
10138
		for (j = 0; j < dev_priv->num_plane; j++) {
10851
		for (j = 0; j < dev_priv->num_plane; j++) {
10139
			ret = intel_plane_init(dev, i, j);
10852
			ret = intel_plane_init(dev, i, j);
10140
		if (ret)
10853
		if (ret)
10141
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
10854
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
10142
					      pipe_name(i), sprite_name(i, j), ret);
10855
					      pipe_name(i), sprite_name(i, j), ret);
10143
		}
10856
		}
10144
	}
10857
	}
-
 
10858
 
-
 
10859
	intel_init_dpio(dev);
-
 
10860
	intel_reset_dpio(dev);
10145
 
10861
 
10146
	intel_cpu_pll_init(dev);
10862
	intel_cpu_pll_init(dev);
10147
	intel_shared_dpll_init(dev);
10863
	intel_shared_dpll_init(dev);
10148
 
10864
 
10149
	/* Just disable it once at startup */
10865
	/* Just disable it once at startup */
10150
	i915_disable_vga(dev);
10866
	i915_disable_vga(dev);
10151
	intel_setup_outputs(dev);
10867
	intel_setup_outputs(dev);
10152
 
10868
 
10153
	/* Just in case the BIOS is doing something questionable. */
10869
	/* Just in case the BIOS is doing something questionable. */
10154
	intel_disable_fbc(dev);
10870
	intel_disable_fbc(dev);
10155
}
10871
}
10156
 
10872
 
10157
static void
10873
static void
10158
intel_connector_break_all_links(struct intel_connector *connector)
10874
intel_connector_break_all_links(struct intel_connector *connector)
10159
{
10875
{
10160
	connector->base.dpms = DRM_MODE_DPMS_OFF;
10876
	connector->base.dpms = DRM_MODE_DPMS_OFF;
10161
	connector->base.encoder = NULL;
10877
	connector->base.encoder = NULL;
10162
	connector->encoder->connectors_active = false;
10878
	connector->encoder->connectors_active = false;
10163
	connector->encoder->base.crtc = NULL;
10879
	connector->encoder->base.crtc = NULL;
10164
}
10880
}
10165
 
10881
 
10166
static void intel_enable_pipe_a(struct drm_device *dev)
10882
static void intel_enable_pipe_a(struct drm_device *dev)
10167
{
10883
{
10168
	struct intel_connector *connector;
10884
	struct intel_connector *connector;
10169
	struct drm_connector *crt = NULL;
10885
	struct drm_connector *crt = NULL;
10170
	struct intel_load_detect_pipe load_detect_temp;
10886
	struct intel_load_detect_pipe load_detect_temp;
10171
 
10887
 
10172
	/* We can't just switch on the pipe A, we need to set things up with a
10888
	/* We can't just switch on the pipe A, we need to set things up with a
10173
	 * proper mode and output configuration. As a gross hack, enable pipe A
10889
	 * proper mode and output configuration. As a gross hack, enable pipe A
10174
	 * by enabling the load detect pipe once. */
10890
	 * by enabling the load detect pipe once. */
10175
	list_for_each_entry(connector,
10891
	list_for_each_entry(connector,
10176
			    &dev->mode_config.connector_list,
10892
			    &dev->mode_config.connector_list,
10177
			    base.head) {
10893
			    base.head) {
10178
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
10894
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
10179
			crt = &connector->base;
10895
			crt = &connector->base;
10180
			break;
10896
			break;
10181
		}
10897
		}
10182
	}
10898
	}
10183
 
10899
 
10184
	if (!crt)
10900
	if (!crt)
10185
		return;
10901
		return;
10186
 
10902
 
10187
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
10903
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
10188
		intel_release_load_detect_pipe(crt, &load_detect_temp);
10904
		intel_release_load_detect_pipe(crt, &load_detect_temp);
10189
 
10905
 
10190
 
10906
 
10191
}
10907
}
10192
 
10908
 
10193
static bool
10909
static bool
10194
intel_check_plane_mapping(struct intel_crtc *crtc)
10910
intel_check_plane_mapping(struct intel_crtc *crtc)
10195
{
10911
{
10196
	struct drm_device *dev = crtc->base.dev;
10912
	struct drm_device *dev = crtc->base.dev;
10197
	struct drm_i915_private *dev_priv = dev->dev_private;
10913
	struct drm_i915_private *dev_priv = dev->dev_private;
10198
	u32 reg, val;
10914
	u32 reg, val;
10199
 
10915
 
10200
	if (INTEL_INFO(dev)->num_pipes == 1)
10916
	if (INTEL_INFO(dev)->num_pipes == 1)
10201
		return true;
10917
		return true;
10202
 
10918
 
10203
	reg = DSPCNTR(!crtc->plane);
10919
	reg = DSPCNTR(!crtc->plane);
10204
	val = I915_READ(reg);
10920
	val = I915_READ(reg);
10205
 
10921
 
10206
	if ((val & DISPLAY_PLANE_ENABLE) &&
10922
	if ((val & DISPLAY_PLANE_ENABLE) &&
10207
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
10923
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
10208
		return false;
10924
		return false;
10209
 
10925
 
10210
	return true;
10926
	return true;
10211
}
10927
}
10212
 
10928
 
10213
static void intel_sanitize_crtc(struct intel_crtc *crtc)
10929
static void intel_sanitize_crtc(struct intel_crtc *crtc)
10214
{
10930
{
10215
	struct drm_device *dev = crtc->base.dev;
10931
	struct drm_device *dev = crtc->base.dev;
10216
	struct drm_i915_private *dev_priv = dev->dev_private;
10932
	struct drm_i915_private *dev_priv = dev->dev_private;
10217
	u32 reg;
10933
	u32 reg;
10218
 
10934
 
10219
	/* Clear any frame start delays used for debugging left by the BIOS */
10935
	/* Clear any frame start delays used for debugging left by the BIOS */
10220
	reg = PIPECONF(crtc->config.cpu_transcoder);
10936
	reg = PIPECONF(crtc->config.cpu_transcoder);
10221
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
10937
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
10222
 
10938
 
10223
	/* We need to sanitize the plane -> pipe mapping first because this will
10939
	/* We need to sanitize the plane -> pipe mapping first because this will
10224
	 * disable the crtc (and hence change the state) if it is wrong. Note
10940
	 * disable the crtc (and hence change the state) if it is wrong. Note
10225
	 * that gen4+ has a fixed plane -> pipe mapping.  */
10941
	 * that gen4+ has a fixed plane -> pipe mapping.  */
10226
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
10942
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
10227
		struct intel_connector *connector;
10943
		struct intel_connector *connector;
10228
		bool plane;
10944
		bool plane;
10229
 
10945
 
10230
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
10946
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
10231
			      crtc->base.base.id);
10947
			      crtc->base.base.id);
10232
 
10948
 
10233
		/* Pipe has the wrong plane attached and the plane is active.
10949
		/* Pipe has the wrong plane attached and the plane is active.
10234
		 * Temporarily change the plane mapping and disable everything
10950
		 * Temporarily change the plane mapping and disable everything
10235
		 * ...  */
10951
		 * ...  */
10236
		plane = crtc->plane;
10952
		plane = crtc->plane;
10237
		crtc->plane = !plane;
10953
		crtc->plane = !plane;
10238
		dev_priv->display.crtc_disable(&crtc->base);
10954
		dev_priv->display.crtc_disable(&crtc->base);
10239
		crtc->plane = plane;
10955
		crtc->plane = plane;
10240
 
10956
 
10241
		/* ... and break all links. */
10957
		/* ... and break all links. */
10242
		list_for_each_entry(connector, &dev->mode_config.connector_list,
10958
		list_for_each_entry(connector, &dev->mode_config.connector_list,
10243
				    base.head) {
10959
				    base.head) {
10244
			if (connector->encoder->base.crtc != &crtc->base)
10960
			if (connector->encoder->base.crtc != &crtc->base)
10245
				continue;
10961
				continue;
10246
 
10962
 
10247
			intel_connector_break_all_links(connector);
10963
			intel_connector_break_all_links(connector);
10248
		}
10964
		}
10249
 
10965
 
10250
		WARN_ON(crtc->active);
10966
		WARN_ON(crtc->active);
10251
		crtc->base.enabled = false;
10967
		crtc->base.enabled = false;
10252
	}
10968
	}
10253
 
10969
 
10254
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
10970
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
10255
	    crtc->pipe == PIPE_A && !crtc->active) {
10971
	    crtc->pipe == PIPE_A && !crtc->active) {
10256
		/* BIOS forgot to enable pipe A, this mostly happens after
10972
		/* BIOS forgot to enable pipe A, this mostly happens after
10257
		 * resume. Force-enable the pipe to fix this, the update_dpms
10973
		 * resume. Force-enable the pipe to fix this, the update_dpms
10258
		 * call below we restore the pipe to the right state, but leave
10974
		 * call below we restore the pipe to the right state, but leave
10259
		 * the required bits on. */
10975
		 * the required bits on. */
10260
		intel_enable_pipe_a(dev);
10976
		intel_enable_pipe_a(dev);
10261
	}
10977
	}
10262
 
10978
 
10263
	/* Adjust the state of the output pipe according to whether we
10979
	/* Adjust the state of the output pipe according to whether we
10264
	 * have active connectors/encoders. */
10980
	 * have active connectors/encoders. */
10265
	intel_crtc_update_dpms(&crtc->base);
10981
	intel_crtc_update_dpms(&crtc->base);
10266
 
10982
 
10267
	if (crtc->active != crtc->base.enabled) {
10983
	if (crtc->active != crtc->base.enabled) {
10268
		struct intel_encoder *encoder;
10984
		struct intel_encoder *encoder;
10269
 
10985
 
10270
		/* This can happen either due to bugs in the get_hw_state
10986
		/* This can happen either due to bugs in the get_hw_state
10271
		 * functions or because the pipe is force-enabled due to the
10987
		 * functions or because the pipe is force-enabled due to the
10272
		 * pipe A quirk. */
10988
		 * pipe A quirk. */
10273
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
10989
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
10274
			      crtc->base.base.id,
10990
			      crtc->base.base.id,
10275
			      crtc->base.enabled ? "enabled" : "disabled",
10991
			      crtc->base.enabled ? "enabled" : "disabled",
10276
			      crtc->active ? "enabled" : "disabled");
10992
			      crtc->active ? "enabled" : "disabled");
10277
 
10993
 
10278
		crtc->base.enabled = crtc->active;
10994
		crtc->base.enabled = crtc->active;
10279
 
10995
 
10280
		/* Because we only establish the connector -> encoder ->
10996
		/* Because we only establish the connector -> encoder ->
10281
		 * crtc links if something is active, this means the
10997
		 * crtc links if something is active, this means the
10282
		 * crtc is now deactivated. Break the links. connector
10998
		 * crtc is now deactivated. Break the links. connector
10283
		 * -> encoder links are only establish when things are
10999
		 * -> encoder links are only establish when things are
10284
		 *  actually up, hence no need to break them. */
11000
		 *  actually up, hence no need to break them. */
10285
		WARN_ON(crtc->active);
11001
		WARN_ON(crtc->active);
10286
 
11002
 
10287
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
11003
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
10288
			WARN_ON(encoder->connectors_active);
11004
			WARN_ON(encoder->connectors_active);
10289
			encoder->base.crtc = NULL;
11005
			encoder->base.crtc = NULL;
10290
		}
11006
		}
10291
	}
11007
	}
10292
}
11008
}
10293
 
11009
 
10294
static void intel_sanitize_encoder(struct intel_encoder *encoder)
11010
static void intel_sanitize_encoder(struct intel_encoder *encoder)
10295
{
11011
{
10296
	struct intel_connector *connector;
11012
	struct intel_connector *connector;
10297
	struct drm_device *dev = encoder->base.dev;
11013
	struct drm_device *dev = encoder->base.dev;
10298
 
11014
 
10299
	/* We need to check both for a crtc link (meaning that the
11015
	/* We need to check both for a crtc link (meaning that the
10300
	 * encoder is active and trying to read from a pipe) and the
11016
	 * encoder is active and trying to read from a pipe) and the
10301
	 * pipe itself being active. */
11017
	 * pipe itself being active. */
10302
	bool has_active_crtc = encoder->base.crtc &&
11018
	bool has_active_crtc = encoder->base.crtc &&
10303
		to_intel_crtc(encoder->base.crtc)->active;
11019
		to_intel_crtc(encoder->base.crtc)->active;
10304
 
11020
 
10305
	if (encoder->connectors_active && !has_active_crtc) {
11021
	if (encoder->connectors_active && !has_active_crtc) {
10306
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
11022
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
10307
			      encoder->base.base.id,
11023
			      encoder->base.base.id,
10308
			      drm_get_encoder_name(&encoder->base));
11024
			      drm_get_encoder_name(&encoder->base));
10309
 
11025
 
10310
		/* Connector is active, but has no active pipe. This is
11026
		/* Connector is active, but has no active pipe. This is
10311
		 * fallout from our resume register restoring. Disable
11027
		 * fallout from our resume register restoring. Disable
10312
		 * the encoder manually again. */
11028
		 * the encoder manually again. */
10313
		if (encoder->base.crtc) {
11029
		if (encoder->base.crtc) {
10314
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
11030
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
10315
				      encoder->base.base.id,
11031
				      encoder->base.base.id,
10316
				      drm_get_encoder_name(&encoder->base));
11032
				      drm_get_encoder_name(&encoder->base));
10317
			encoder->disable(encoder);
11033
			encoder->disable(encoder);
10318
		}
11034
		}
10319
 
11035
 
10320
		/* Inconsistent output/port/pipe state happens presumably due to
11036
		/* Inconsistent output/port/pipe state happens presumably due to
10321
		 * a bug in one of the get_hw_state functions. Or someplace else
11037
		 * a bug in one of the get_hw_state functions. Or someplace else
10322
		 * in our code, like the register restore mess on resume. Clamp
11038
		 * in our code, like the register restore mess on resume. Clamp
10323
		 * things to off as a safer default. */
11039
		 * things to off as a safer default. */
10324
		list_for_each_entry(connector,
11040
		list_for_each_entry(connector,
10325
				    &dev->mode_config.connector_list,
11041
				    &dev->mode_config.connector_list,
10326
				    base.head) {
11042
				    base.head) {
10327
			if (connector->encoder != encoder)
11043
			if (connector->encoder != encoder)
10328
				continue;
11044
				continue;
10329
 
11045
 
10330
			intel_connector_break_all_links(connector);
11046
			intel_connector_break_all_links(connector);
10331
		}
11047
		}
10332
	}
11048
	}
10333
	/* Enabled encoders without active connectors will be fixed in
11049
	/* Enabled encoders without active connectors will be fixed in
10334
	 * the crtc fixup. */
11050
	 * the crtc fixup. */
10335
}
11051
}
10336
 
11052
 
10337
void i915_redisable_vga(struct drm_device *dev)
11053
void i915_redisable_vga(struct drm_device *dev)
10338
{
11054
{
10339
	struct drm_i915_private *dev_priv = dev->dev_private;
11055
	struct drm_i915_private *dev_priv = dev->dev_private;
10340
	u32 vga_reg = i915_vgacntrl_reg(dev);
11056
	u32 vga_reg = i915_vgacntrl_reg(dev);
10341
 
11057
 
10342
	/* This function can be called both from intel_modeset_setup_hw_state or
11058
	/* This function can be called both from intel_modeset_setup_hw_state or
10343
	 * at a very early point in our resume sequence, where the power well
11059
	 * at a very early point in our resume sequence, where the power well
10344
	 * structures are not yet restored. Since this function is at a very
11060
	 * structures are not yet restored. Since this function is at a very
10345
	 * paranoid "someone might have enabled VGA while we were not looking"
11061
	 * paranoid "someone might have enabled VGA while we were not looking"
10346
	 * level, just check if the power well is enabled instead of trying to
11062
	 * level, just check if the power well is enabled instead of trying to
10347
	 * follow the "don't touch the power well if we don't need it" policy
11063
	 * follow the "don't touch the power well if we don't need it" policy
10348
	 * the rest of the driver uses. */
11064
	 * the rest of the driver uses. */
10349
	if (HAS_POWER_WELL(dev) &&
11065
	if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
10350
	    (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
11066
	    (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
10351
		return;
11067
		return;
10352
 
11068
 
10353
	if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
11069
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
10354
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
11070
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
10355
		i915_disable_vga(dev);
11071
		i915_disable_vga(dev);
10356
	}
11072
	}
10357
}
11073
}
10358
 
11074
 
10359
static void intel_modeset_readout_hw_state(struct drm_device *dev)
11075
static void intel_modeset_readout_hw_state(struct drm_device *dev)
10360
{
11076
{
10361
	struct drm_i915_private *dev_priv = dev->dev_private;
11077
	struct drm_i915_private *dev_priv = dev->dev_private;
10362
	enum pipe pipe;
11078
	enum pipe pipe;
10363
	struct intel_crtc *crtc;
11079
	struct intel_crtc *crtc;
10364
	struct intel_encoder *encoder;
11080
	struct intel_encoder *encoder;
10365
	struct intel_connector *connector;
11081
	struct intel_connector *connector;
10366
	int i;
11082
	int i;
10367
 
11083
 
10368
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11084
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10369
			    base.head) {
11085
			    base.head) {
10370
		memset(&crtc->config, 0, sizeof(crtc->config));
11086
		memset(&crtc->config, 0, sizeof(crtc->config));
10371
 
11087
 
10372
		crtc->active = dev_priv->display.get_pipe_config(crtc,
11088
		crtc->active = dev_priv->display.get_pipe_config(crtc,
10373
								 &crtc->config);
11089
								 &crtc->config);
10374
 
11090
 
10375
		crtc->base.enabled = crtc->active;
11091
		crtc->base.enabled = crtc->active;
-
 
11092
		crtc->primary_enabled = crtc->active;
10376
 
11093
 
10377
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
11094
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
10378
			      crtc->base.base.id,
11095
			      crtc->base.base.id,
10379
			      crtc->active ? "enabled" : "disabled");
11096
			      crtc->active ? "enabled" : "disabled");
10380
	}
11097
	}
10381
 
11098
 
10382
	/* FIXME: Smash this into the new shared dpll infrastructure. */
11099
	/* FIXME: Smash this into the new shared dpll infrastructure. */
10383
	if (HAS_DDI(dev))
11100
	if (HAS_DDI(dev))
10384
		intel_ddi_setup_hw_pll_state(dev);
11101
		intel_ddi_setup_hw_pll_state(dev);
10385
 
11102
 
10386
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11103
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10387
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
11104
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10388
 
11105
 
10389
		pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
11106
		pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
10390
		pll->active = 0;
11107
		pll->active = 0;
10391
		list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11108
		list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10392
				    base.head) {
11109
				    base.head) {
10393
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
11110
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10394
				pll->active++;
11111
				pll->active++;
10395
		}
11112
		}
10396
		pll->refcount = pll->active;
11113
		pll->refcount = pll->active;
10397
 
11114
 
10398
		DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
11115
		DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
10399
			      pll->name, pll->refcount, pll->on);
11116
			      pll->name, pll->refcount, pll->on);
10400
	}
11117
	}
10401
 
11118
 
10402
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
11119
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10403
			    base.head) {
11120
			    base.head) {
10404
		pipe = 0;
11121
		pipe = 0;
10405
 
11122
 
10406
		if (encoder->get_hw_state(encoder, &pipe)) {
11123
		if (encoder->get_hw_state(encoder, &pipe)) {
10407
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
11124
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
10408
			encoder->base.crtc = &crtc->base;
11125
			encoder->base.crtc = &crtc->base;
10409
			if (encoder->get_config)
-
 
10410
				encoder->get_config(encoder, &crtc->config);
11126
				encoder->get_config(encoder, &crtc->config);
10411
		} else {
11127
		} else {
10412
			encoder->base.crtc = NULL;
11128
			encoder->base.crtc = NULL;
10413
		}
11129
		}
10414
 
11130
 
10415
		encoder->connectors_active = false;
11131
		encoder->connectors_active = false;
10416
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
11132
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
10417
			      encoder->base.base.id,
11133
			      encoder->base.base.id,
10418
			      drm_get_encoder_name(&encoder->base),
11134
			      drm_get_encoder_name(&encoder->base),
10419
			      encoder->base.crtc ? "enabled" : "disabled",
11135
			      encoder->base.crtc ? "enabled" : "disabled",
10420
			      pipe);
11136
			      pipe_name(pipe));
10421
	}
-
 
10422
 
-
 
10423
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
-
 
10424
			    base.head) {
-
 
10425
		if (!crtc->active)
-
 
10426
			continue;
-
 
10427
		if (dev_priv->display.get_clock)
-
 
10428
			dev_priv->display.get_clock(crtc,
-
 
10429
						    &crtc->config);
-
 
10430
	}
11137
	}
10431
 
11138
 
10432
	list_for_each_entry(connector, &dev->mode_config.connector_list,
11139
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10433
			    base.head) {
11140
			    base.head) {
10434
		if (connector->get_hw_state(connector)) {
11141
		if (connector->get_hw_state(connector)) {
10435
			connector->base.dpms = DRM_MODE_DPMS_ON;
11142
			connector->base.dpms = DRM_MODE_DPMS_ON;
10436
			connector->encoder->connectors_active = true;
11143
			connector->encoder->connectors_active = true;
10437
			connector->base.encoder = &connector->encoder->base;
11144
			connector->base.encoder = &connector->encoder->base;
10438
		} else {
11145
		} else {
10439
			connector->base.dpms = DRM_MODE_DPMS_OFF;
11146
			connector->base.dpms = DRM_MODE_DPMS_OFF;
10440
			connector->base.encoder = NULL;
11147
			connector->base.encoder = NULL;
10441
		}
11148
		}
10442
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
11149
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
10443
			      connector->base.base.id,
11150
			      connector->base.base.id,
10444
			      drm_get_connector_name(&connector->base),
11151
			      drm_get_connector_name(&connector->base),
10445
			      connector->base.encoder ? "enabled" : "disabled");
11152
			      connector->base.encoder ? "enabled" : "disabled");
10446
	}
11153
	}
10447
}
11154
}
10448
 
11155
 
10449
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
11156
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
10450
 * and i915 state tracking structures. */
11157
 * and i915 state tracking structures. */
10451
void intel_modeset_setup_hw_state(struct drm_device *dev,
11158
void intel_modeset_setup_hw_state(struct drm_device *dev,
10452
				  bool force_restore)
11159
				  bool force_restore)
10453
{
11160
{
10454
	struct drm_i915_private *dev_priv = dev->dev_private;
11161
	struct drm_i915_private *dev_priv = dev->dev_private;
10455
	enum pipe pipe;
11162
	enum pipe pipe;
10456
	struct drm_plane *plane;
-
 
10457
	struct intel_crtc *crtc;
11163
	struct intel_crtc *crtc;
10458
	struct intel_encoder *encoder;
11164
	struct intel_encoder *encoder;
10459
	int i;
11165
	int i;
10460
 
11166
 
10461
	intel_modeset_readout_hw_state(dev);
11167
	intel_modeset_readout_hw_state(dev);
10462
 
11168
 
10463
	/*
11169
	/*
10464
	 * Now that we have the config, copy it to each CRTC struct
11170
	 * Now that we have the config, copy it to each CRTC struct
10465
	 * Note that this could go away if we move to using crtc_config
11171
	 * Note that this could go away if we move to using crtc_config
10466
	 * checking everywhere.
11172
	 * checking everywhere.
10467
	 */
11173
	 */
10468
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11174
	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10469
			    base.head) {
11175
			    base.head) {
10470
		if (crtc->active && i915_fastboot) {
11176
		if (crtc->active && i915_fastboot) {
10471
			intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
11177
			intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
10472
 
11178
 
10473
			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
11179
			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
10474
				      crtc->base.base.id);
11180
				      crtc->base.base.id);
10475
			drm_mode_debug_printmodeline(&crtc->base.mode);
11181
			drm_mode_debug_printmodeline(&crtc->base.mode);
10476
		}
11182
		}
10477
	}
11183
	}
10478
 
11184
 
10479
	/* HW state is read out, now we need to sanitize this mess. */
11185
	/* HW state is read out, now we need to sanitize this mess. */
10480
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
11186
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10481
			    base.head) {
11187
			    base.head) {
10482
		intel_sanitize_encoder(encoder);
11188
		intel_sanitize_encoder(encoder);
10483
	}
11189
	}
10484
 
11190
 
10485
	for_each_pipe(pipe) {
11191
	for_each_pipe(pipe) {
10486
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
11192
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
10487
		intel_sanitize_crtc(crtc);
11193
		intel_sanitize_crtc(crtc);
10488
		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
11194
		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
10489
	}
11195
	}
10490
 
11196
 
10491
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11197
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10492
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
11198
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10493
 
11199
 
10494
		if (!pll->on || pll->active)
11200
		if (!pll->on || pll->active)
10495
			continue;
11201
			continue;
10496
 
11202
 
10497
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
11203
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
10498
 
11204
 
10499
		pll->disable(dev_priv, pll);
11205
		pll->disable(dev_priv, pll);
10500
		pll->on = false;
11206
		pll->on = false;
10501
	}
11207
	}
-
 
11208
 
-
 
11209
	if (HAS_PCH_SPLIT(dev))
-
 
11210
		ilk_wm_get_hw_state(dev);
10502
 
11211
 
-
 
11212
	if (force_restore) {
-
 
11213
		i915_redisable_vga(dev);
10503
	if (force_restore) {
11214
 
10504
		/*
11215
		/*
10505
		 * We need to use raw interfaces for restoring state to avoid
11216
		 * We need to use raw interfaces for restoring state to avoid
10506
		 * checking (bogus) intermediate states.
11217
		 * checking (bogus) intermediate states.
10507
		 */
11218
		 */
10508
		for_each_pipe(pipe) {
11219
		for_each_pipe(pipe) {
10509
			struct drm_crtc *crtc =
11220
			struct drm_crtc *crtc =
10510
				dev_priv->pipe_to_crtc_mapping[pipe];
11221
				dev_priv->pipe_to_crtc_mapping[pipe];
10511
 
11222
 
10512
			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
11223
			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
10513
					 crtc->fb);
11224
					 crtc->fb);
10514
		}
11225
		}
10515
		list_for_each_entry(plane, &dev->mode_config.plane_list, head)
-
 
10516
			intel_plane_restore(plane);
-
 
10517
 
-
 
10518
		i915_redisable_vga(dev);
-
 
10519
	} else {
11226
	} else {
10520
	intel_modeset_update_staged_output_state(dev);
11227
	intel_modeset_update_staged_output_state(dev);
10521
	}
11228
	}
10522
 
11229
 
10523
	intel_modeset_check_state(dev);
11230
	intel_modeset_check_state(dev);
10524
 
-
 
10525
	drm_mode_config_reset(dev);
-
 
10526
}
11231
}
10527
 
11232
 
10528
void intel_modeset_gem_init(struct drm_device *dev)
11233
void intel_modeset_gem_init(struct drm_device *dev)
10529
{
11234
{
10530
	intel_modeset_init_hw(dev);
11235
	intel_modeset_init_hw(dev);
10531
 
11236
 
10532
//   intel_setup_overlay(dev);
11237
//   intel_setup_overlay(dev);
10533
 
11238
 
10534
	mutex_lock(&dev->mode_config.mutex);
11239
	mutex_lock(&dev->mode_config.mutex);
-
 
11240
	drm_mode_config_reset(dev);
10535
	intel_modeset_setup_hw_state(dev, false);
11241
	intel_modeset_setup_hw_state(dev, false);
10536
	mutex_unlock(&dev->mode_config.mutex);
11242
	mutex_unlock(&dev->mode_config.mutex);
10537
}
11243
}
10538
 
11244
 
10539
void intel_modeset_cleanup(struct drm_device *dev)
11245
void intel_modeset_cleanup(struct drm_device *dev)
10540
{
11246
{
10541
#if 0
11247
#if 0
10542
	struct drm_i915_private *dev_priv = dev->dev_private;
11248
	struct drm_i915_private *dev_priv = dev->dev_private;
10543
	struct drm_crtc *crtc;
11249
	struct drm_crtc *crtc;
-
 
11250
	struct drm_connector *connector;
10544
 
11251
 
10545
	/*
11252
	/*
10546
	 * Interrupts and polling as the first thing to avoid creating havoc.
11253
	 * Interrupts and polling as the first thing to avoid creating havoc.
10547
	 * Too much stuff here (turning of rps, connectors, ...) would
11254
	 * Too much stuff here (turning of rps, connectors, ...) would
10548
	 * experience fancy races otherwise.
11255
	 * experience fancy races otherwise.
10549
	 */
11256
	 */
10550
	drm_irq_uninstall(dev);
11257
	drm_irq_uninstall(dev);
10551
	cancel_work_sync(&dev_priv->hotplug_work);
11258
	cancel_work_sync(&dev_priv->hotplug_work);
10552
	/*
11259
	/*
10553
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
11260
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
10554
	 * poll handlers. Hence disable polling after hpd handling is shut down.
11261
	 * poll handlers. Hence disable polling after hpd handling is shut down.
10555
	 */
11262
	 */
10556
//   drm_kms_helper_poll_fini(dev);
11263
	drm_kms_helper_poll_fini(dev);
10557
 
11264
 
10558
	mutex_lock(&dev->struct_mutex);
11265
	mutex_lock(&dev->struct_mutex);
10559
 
11266
 
10560
//   intel_unregister_dsm_handler();
11267
	intel_unregister_dsm_handler();
10561
 
11268
 
10562
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
11269
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10563
		/* Skip inactive CRTCs */
11270
		/* Skip inactive CRTCs */
10564
		if (!crtc->fb)
11271
		if (!crtc->fb)
10565
			continue;
11272
			continue;
10566
 
11273
 
10567
		intel_increase_pllclock(crtc);
11274
		intel_increase_pllclock(crtc);
10568
	}
11275
	}
10569
 
11276
 
10570
	intel_disable_fbc(dev);
11277
	intel_disable_fbc(dev);
10571
 
11278
 
10572
	intel_disable_gt_powersave(dev);
11279
	intel_disable_gt_powersave(dev);
10573
 
11280
 
10574
	ironlake_teardown_rc6(dev);
11281
	ironlake_teardown_rc6(dev);
10575
 
11282
 
10576
	mutex_unlock(&dev->struct_mutex);
11283
	mutex_unlock(&dev->struct_mutex);
10577
 
11284
 
10578
	/* flush any delayed tasks or pending work */
11285
	/* flush any delayed tasks or pending work */
10579
	flush_scheduled_work();
11286
	flush_scheduled_work();
10580
 
11287
 
-
 
11288
	/* destroy the backlight and sysfs files before encoders/connectors */
10581
	/* destroy backlight, if any, before the connectors */
11289
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
 
11290
		intel_panel_destroy_backlight(connector);
-
 
11291
		drm_sysfs_connector_remove(connector);
10582
	intel_panel_destroy_backlight(dev);
11292
	}
10583
 
11293
 
10584
	drm_mode_config_cleanup(dev);
11294
	drm_mode_config_cleanup(dev);
10585
#endif
11295
#endif
10586
}
11296
}
10587
 
11297
 
10588
/*
11298
/*
10589
 * Return which encoder is currently attached for connector.
11299
 * Return which encoder is currently attached for connector.
10590
 */
11300
 */
10591
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
11301
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
10592
{
11302
{
10593
	return &intel_attached_encoder(connector)->base;
11303
	return &intel_attached_encoder(connector)->base;
10594
}
11304
}
10595
 
11305
 
10596
void intel_connector_attach_encoder(struct intel_connector *connector,
11306
void intel_connector_attach_encoder(struct intel_connector *connector,
10597
				    struct intel_encoder *encoder)
11307
				    struct intel_encoder *encoder)
10598
{
11308
{
10599
	connector->encoder = encoder;
11309
	connector->encoder = encoder;
10600
	drm_mode_connector_attach_encoder(&connector->base,
11310
	drm_mode_connector_attach_encoder(&connector->base,
10601
					  &encoder->base);
11311
					  &encoder->base);
10602
}
11312
}
10603
 
11313
 
10604
/*
11314
/*
10605
 * set vga decode state - true == enable VGA decode
11315
 * set vga decode state - true == enable VGA decode
10606
 */
11316
 */
10607
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
11317
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
10608
{
11318
{
10609
	struct drm_i915_private *dev_priv = dev->dev_private;
11319
	struct drm_i915_private *dev_priv = dev->dev_private;
10610
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
11320
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
10611
	u16 gmch_ctrl;
11321
	u16 gmch_ctrl;
10612
 
11322
 
10613
	pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
11323
	pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
10614
	if (state)
11324
	if (state)
10615
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
11325
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
10616
	else
11326
	else
10617
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
11327
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
10618
	pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
11328
	pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
10619
	return 0;
11329
	return 0;
10620
}
11330
}
10621
 
11331
 
10622
#ifdef CONFIG_DEBUG_FS
11332
#ifdef CONFIG_DEBUG_FS
10623
#include 
-
 
10624
 
11333
 
10625
struct intel_display_error_state {
11334
struct intel_display_error_state {
10626
 
11335
 
10627
	u32 power_well_driver;
11336
	u32 power_well_driver;
10628
 
11337
 
10629
	int num_transcoders;
11338
	int num_transcoders;
10630
 
11339
 
10631
	struct intel_cursor_error_state {
11340
	struct intel_cursor_error_state {
10632
		u32 control;
11341
		u32 control;
10633
		u32 position;
11342
		u32 position;
10634
		u32 base;
11343
		u32 base;
10635
		u32 size;
11344
		u32 size;
10636
	} cursor[I915_MAX_PIPES];
11345
	} cursor[I915_MAX_PIPES];
10637
 
11346
 
10638
	struct intel_pipe_error_state {
11347
	struct intel_pipe_error_state {
-
 
11348
		bool power_domain_on;
10639
		u32 source;
11349
		u32 source;
10640
	} pipe[I915_MAX_PIPES];
11350
	} pipe[I915_MAX_PIPES];
10641
 
11351
 
10642
	struct intel_plane_error_state {
11352
	struct intel_plane_error_state {
10643
		u32 control;
11353
		u32 control;
10644
		u32 stride;
11354
		u32 stride;
10645
		u32 size;
11355
		u32 size;
10646
		u32 pos;
11356
		u32 pos;
10647
		u32 addr;
11357
		u32 addr;
10648
		u32 surface;
11358
		u32 surface;
10649
		u32 tile_offset;
11359
		u32 tile_offset;
10650
	} plane[I915_MAX_PIPES];
11360
	} plane[I915_MAX_PIPES];
10651
 
11361
 
10652
	struct intel_transcoder_error_state {
11362
	struct intel_transcoder_error_state {
-
 
11363
		bool power_domain_on;
10653
		enum transcoder cpu_transcoder;
11364
		enum transcoder cpu_transcoder;
10654
 
11365
 
10655
		u32 conf;
11366
		u32 conf;
10656
 
11367
 
10657
		u32 htotal;
11368
		u32 htotal;
10658
		u32 hblank;
11369
		u32 hblank;
10659
		u32 hsync;
11370
		u32 hsync;
10660
		u32 vtotal;
11371
		u32 vtotal;
10661
		u32 vblank;
11372
		u32 vblank;
10662
		u32 vsync;
11373
		u32 vsync;
10663
	} transcoder[4];
11374
	} transcoder[4];
10664
};
11375
};
10665
 
11376
 
10666
struct intel_display_error_state *
11377
struct intel_display_error_state *
10667
intel_display_capture_error_state(struct drm_device *dev)
11378
intel_display_capture_error_state(struct drm_device *dev)
10668
{
11379
{
10669
	drm_i915_private_t *dev_priv = dev->dev_private;
11380
	drm_i915_private_t *dev_priv = dev->dev_private;
10670
	struct intel_display_error_state *error;
11381
	struct intel_display_error_state *error;
10671
	int transcoders[] = {
11382
	int transcoders[] = {
10672
		TRANSCODER_A,
11383
		TRANSCODER_A,
10673
		TRANSCODER_B,
11384
		TRANSCODER_B,
10674
		TRANSCODER_C,
11385
		TRANSCODER_C,
10675
		TRANSCODER_EDP,
11386
		TRANSCODER_EDP,
10676
	};
11387
	};
10677
	int i;
11388
	int i;
10678
 
11389
 
10679
	if (INTEL_INFO(dev)->num_pipes == 0)
11390
	if (INTEL_INFO(dev)->num_pipes == 0)
10680
		return NULL;
11391
		return NULL;
10681
 
11392
 
10682
	error = kmalloc(sizeof(*error), GFP_ATOMIC);
11393
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
10683
	if (error == NULL)
11394
	if (error == NULL)
10684
		return NULL;
11395
		return NULL;
10685
 
11396
 
10686
	if (HAS_POWER_WELL(dev))
11397
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
10687
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
11398
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
10688
 
11399
 
10689
	for_each_pipe(i) {
11400
	for_each_pipe(i) {
-
 
11401
		error->pipe[i].power_domain_on =
-
 
11402
			intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
-
 
11403
		if (!error->pipe[i].power_domain_on)
-
 
11404
			continue;
-
 
11405
 
10690
		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
11406
		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
10691
		error->cursor[i].control = I915_READ(CURCNTR(i));
11407
		error->cursor[i].control = I915_READ(CURCNTR(i));
10692
		error->cursor[i].position = I915_READ(CURPOS(i));
11408
		error->cursor[i].position = I915_READ(CURPOS(i));
10693
		error->cursor[i].base = I915_READ(CURBASE(i));
11409
		error->cursor[i].base = I915_READ(CURBASE(i));
10694
		} else {
11410
		} else {
10695
			error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
11411
			error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
10696
			error->cursor[i].position = I915_READ(CURPOS_IVB(i));
11412
			error->cursor[i].position = I915_READ(CURPOS_IVB(i));
10697
			error->cursor[i].base = I915_READ(CURBASE_IVB(i));
11413
			error->cursor[i].base = I915_READ(CURBASE_IVB(i));
10698
		}
11414
		}
10699
 
11415
 
10700
		error->plane[i].control = I915_READ(DSPCNTR(i));
11416
		error->plane[i].control = I915_READ(DSPCNTR(i));
10701
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
11417
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
10702
		if (INTEL_INFO(dev)->gen <= 3) {
11418
		if (INTEL_INFO(dev)->gen <= 3) {
10703
		error->plane[i].size = I915_READ(DSPSIZE(i));
11419
		error->plane[i].size = I915_READ(DSPSIZE(i));
10704
		error->plane[i].pos = I915_READ(DSPPOS(i));
11420
		error->plane[i].pos = I915_READ(DSPPOS(i));
10705
		}
11421
		}
10706
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
11422
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
10707
		error->plane[i].addr = I915_READ(DSPADDR(i));
11423
		error->plane[i].addr = I915_READ(DSPADDR(i));
10708
		if (INTEL_INFO(dev)->gen >= 4) {
11424
		if (INTEL_INFO(dev)->gen >= 4) {
10709
			error->plane[i].surface = I915_READ(DSPSURF(i));
11425
			error->plane[i].surface = I915_READ(DSPSURF(i));
10710
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
11426
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
10711
		}
11427
		}
10712
 
11428
 
10713
		error->pipe[i].source = I915_READ(PIPESRC(i));
11429
		error->pipe[i].source = I915_READ(PIPESRC(i));
10714
	}
11430
	}
10715
 
11431
 
10716
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
11432
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
10717
	if (HAS_DDI(dev_priv->dev))
11433
	if (HAS_DDI(dev_priv->dev))
10718
		error->num_transcoders++; /* Account for eDP. */
11434
		error->num_transcoders++; /* Account for eDP. */
10719
 
11435
 
10720
	for (i = 0; i < error->num_transcoders; i++) {
11436
	for (i = 0; i < error->num_transcoders; i++) {
10721
		enum transcoder cpu_transcoder = transcoders[i];
11437
		enum transcoder cpu_transcoder = transcoders[i];
-
 
11438
 
-
 
11439
		error->transcoder[i].power_domain_on =
-
 
11440
			intel_display_power_enabled_sw(dev,
-
 
11441
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
-
 
11442
		if (!error->transcoder[i].power_domain_on)
-
 
11443
			continue;
10722
 
11444
 
10723
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
11445
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
10724
 
11446
 
10725
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
11447
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
10726
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
11448
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
10727
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
11449
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
10728
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
11450
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
10729
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
11451
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
10730
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
11452
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
10731
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
11453
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
10732
	}
11454
	}
10733
 
-
 
10734
	/* In the code above we read the registers without checking if the power
-
 
10735
	 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
-
 
10736
	 * prevent the next I915_WRITE from detecting it and printing an error
-
 
10737
	 * message. */
-
 
10738
	intel_uncore_clear_errors(dev);
-
 
10739
 
11455
 
10740
	return error;
11456
	return error;
10741
}
11457
}
10742
 
11458
 
10743
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
11459
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
10744
 
11460
 
10745
void
11461
void
10746
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
11462
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
10747
				struct drm_device *dev,
11463
				struct drm_device *dev,
10748
				struct intel_display_error_state *error)
11464
				struct intel_display_error_state *error)
10749
{
11465
{
10750
	int i;
11466
	int i;
10751
 
11467
 
10752
	if (!error)
11468
	if (!error)
10753
		return;
11469
		return;
10754
 
11470
 
10755
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
11471
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
10756
	if (HAS_POWER_WELL(dev))
11472
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
10757
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
11473
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
10758
			   error->power_well_driver);
11474
			   error->power_well_driver);
10759
	for_each_pipe(i) {
11475
	for_each_pipe(i) {
10760
		err_printf(m, "Pipe [%d]:\n", i);
11476
		err_printf(m, "Pipe [%d]:\n", i);
-
 
11477
		err_printf(m, "  Power: %s\n",
-
 
11478
			   error->pipe[i].power_domain_on ? "on" : "off");
10761
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
11479
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
10762
 
11480
 
10763
		err_printf(m, "Plane [%d]:\n", i);
11481
		err_printf(m, "Plane [%d]:\n", i);
10764
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
11482
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
10765
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
11483
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
10766
		if (INTEL_INFO(dev)->gen <= 3) {
11484
		if (INTEL_INFO(dev)->gen <= 3) {
10767
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
11485
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
10768
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
11486
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
10769
		}
11487
		}
10770
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
11488
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
10771
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
11489
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
10772
		if (INTEL_INFO(dev)->gen >= 4) {
11490
		if (INTEL_INFO(dev)->gen >= 4) {
10773
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
11491
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
10774
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
11492
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
10775
		}
11493
		}
10776
 
11494
 
10777
		err_printf(m, "Cursor [%d]:\n", i);
11495
		err_printf(m, "Cursor [%d]:\n", i);
10778
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
11496
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
10779
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
11497
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
10780
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
11498
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
10781
	}
11499
	}
10782
 
11500
 
10783
	for (i = 0; i < error->num_transcoders; i++) {
11501
	for (i = 0; i < error->num_transcoders; i++) {
10784
		err_printf(m, "  CPU transcoder: %c\n",
11502
		err_printf(m, "CPU transcoder: %c\n",
10785
			   transcoder_name(error->transcoder[i].cpu_transcoder));
11503
			   transcoder_name(error->transcoder[i].cpu_transcoder));
-
 
11504
		err_printf(m, "  Power: %s\n",
-
 
11505
			   error->transcoder[i].power_domain_on ? "on" : "off");
10786
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
11506
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
10787
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
11507
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
10788
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
11508
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
10789
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
11509
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
10790
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
11510
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
10791
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
11511
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
10792
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
11512
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
10793
	}
11513
	}
10794
}
11514
}
10795
#endif
11515
#endif