Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 5354 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5060 Rev 5097
1
/*
1
/*
2
 * Copyright © 2006-2007 Intel Corporation
2
 * Copyright © 2006-2007 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
21
 * DEALINGS IN THE SOFTWARE.
22
 *
22
 *
23
 * Authors:
23
 * Authors:
24
 *  Eric Anholt 
24
 *  Eric Anholt 
25
 */
25
 */
26
 
26
 
27
//#include 
27
#include 
28
#include 
28
#include 
29
//#include 
29
//#include 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
35
#include 
36
#include "intel_drv.h"
36
#include "intel_drv.h"
37
#include 
37
#include 
38
#include "i915_drv.h"
38
#include "i915_drv.h"
39
#include "i915_trace.h"
39
#include "i915_trace.h"
40
#include 
40
#include 
41
#include 
41
#include 
42
#include 
42
#include 
43
#include 
43
#include 
44
#include 
44
#include 
45
 
45
 
46
static inline void ndelay(unsigned long x)
46
static inline void ndelay(unsigned long x)
47
{
47
{
48
    udelay(DIV_ROUND_UP(x, 1000));
48
    udelay(DIV_ROUND_UP(x, 1000));
49
}
49
}
50
 
50
 
51
/* Primary plane formats supported by all gen */
51
/* Primary plane formats supported by all gen */
52
#define COMMON_PRIMARY_FORMATS \
52
#define COMMON_PRIMARY_FORMATS \
53
	DRM_FORMAT_C8, \
53
	DRM_FORMAT_C8, \
54
	DRM_FORMAT_RGB565, \
54
	DRM_FORMAT_RGB565, \
55
	DRM_FORMAT_XRGB8888, \
55
	DRM_FORMAT_XRGB8888, \
56
	DRM_FORMAT_ARGB8888
56
	DRM_FORMAT_ARGB8888
57
 
57
 
58
/* Primary plane formats for gen <= 3 */
58
/* Primary plane formats for gen <= 3 */
59
static const uint32_t intel_primary_formats_gen2[] = {
59
static const uint32_t intel_primary_formats_gen2[] = {
60
	COMMON_PRIMARY_FORMATS,
60
	COMMON_PRIMARY_FORMATS,
61
	DRM_FORMAT_XRGB1555,
61
	DRM_FORMAT_XRGB1555,
62
	DRM_FORMAT_ARGB1555,
62
	DRM_FORMAT_ARGB1555,
63
};
63
};
64
 
64
 
65
/* Primary plane formats for gen >= 4 */
65
/* Primary plane formats for gen >= 4 */
66
static const uint32_t intel_primary_formats_gen4[] = {
66
static const uint32_t intel_primary_formats_gen4[] = {
67
	COMMON_PRIMARY_FORMATS, \
67
	COMMON_PRIMARY_FORMATS, \
68
	DRM_FORMAT_XBGR8888,
68
	DRM_FORMAT_XBGR8888,
69
	DRM_FORMAT_ABGR8888,
69
	DRM_FORMAT_ABGR8888,
70
	DRM_FORMAT_XRGB2101010,
70
	DRM_FORMAT_XRGB2101010,
71
	DRM_FORMAT_ARGB2101010,
71
	DRM_FORMAT_ARGB2101010,
72
	DRM_FORMAT_XBGR2101010,
72
	DRM_FORMAT_XBGR2101010,
73
	DRM_FORMAT_ABGR2101010,
73
	DRM_FORMAT_ABGR2101010,
74
};
74
};
75
 
75
 
76
/* Cursor formats */
76
/* Cursor formats */
77
static const uint32_t intel_cursor_formats[] = {
77
static const uint32_t intel_cursor_formats[] = {
78
	DRM_FORMAT_ARGB8888,
78
	DRM_FORMAT_ARGB8888,
79
};
79
};
80
 
80
 
81
#define DIV_ROUND_CLOSEST_ULL(ll, d)	\
81
#define DIV_ROUND_CLOSEST_ULL(ll, d)	\
82
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
82
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
83
 
83
 
84
#define MAX_ERRNO       4095
84
#define MAX_ERRNO       4095
85
phys_addr_t get_bus_addr(void);
85
phys_addr_t get_bus_addr(void);
86
 
86
 
87
static inline void outb(u8 v, u16 port)
87
static inline void outb(u8 v, u16 port)
88
{
88
{
89
    asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
89
    asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
90
}
90
}
91
static inline u8 inb(u16 port)
91
static inline u8 inb(u16 port)
92
{
92
{
93
    u8 v;
93
    u8 v;
94
    asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
94
    asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
95
    return v;
95
    return v;
96
}
96
}
97
 
97
 
98
static void intel_increase_pllclock(struct drm_device *dev,
98
static void intel_increase_pllclock(struct drm_device *dev,
99
				    enum pipe pipe);
99
				    enum pipe pipe);
100
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
100
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
101
 
101
 
102
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
102
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
103
				struct intel_crtc_config *pipe_config);
103
				struct intel_crtc_config *pipe_config);
104
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
104
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
105
				    struct intel_crtc_config *pipe_config);
105
				    struct intel_crtc_config *pipe_config);
106
 
106
 
107
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
107
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
108
			  int x, int y, struct drm_framebuffer *old_fb);
108
			  int x, int y, struct drm_framebuffer *old_fb);
109
static int intel_framebuffer_init(struct drm_device *dev,
109
static int intel_framebuffer_init(struct drm_device *dev,
110
				  struct intel_framebuffer *ifb,
110
				  struct intel_framebuffer *ifb,
111
				  struct drm_mode_fb_cmd2 *mode_cmd,
111
				  struct drm_mode_fb_cmd2 *mode_cmd,
112
				  struct drm_i915_gem_object *obj);
112
				  struct drm_i915_gem_object *obj);
113
static void intel_dp_set_m_n(struct intel_crtc *crtc);
113
static void intel_dp_set_m_n(struct intel_crtc *crtc);
114
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
114
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
115
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
115
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
116
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
116
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
117
					 struct intel_link_m_n *m_n);
117
					 struct intel_link_m_n *m_n);
118
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
118
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
119
static void haswell_set_pipeconf(struct drm_crtc *crtc);
119
static void haswell_set_pipeconf(struct drm_crtc *crtc);
120
static void intel_set_pipe_csc(struct drm_crtc *crtc);
120
static void intel_set_pipe_csc(struct drm_crtc *crtc);
121
static void vlv_prepare_pll(struct intel_crtc *crtc);
121
static void vlv_prepare_pll(struct intel_crtc *crtc);
122
 
122
 
123
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
123
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
124
{
124
{
125
	if (!connector->mst_port)
125
	if (!connector->mst_port)
126
		return connector->encoder;
126
		return connector->encoder;
127
	else
127
	else
128
		return &connector->mst_port->mst_encoders[pipe]->base;
128
		return &connector->mst_port->mst_encoders[pipe]->base;
129
}
129
}
130
 
130
 
131
typedef struct {
131
typedef struct {
132
    int min, max;
132
    int min, max;
133
} intel_range_t;
133
} intel_range_t;
134
 
134
 
135
typedef struct {
135
typedef struct {
136
    int dot_limit;
136
    int dot_limit;
137
    int p2_slow, p2_fast;
137
    int p2_slow, p2_fast;
138
} intel_p2_t;
138
} intel_p2_t;
139
 
139
 
140
typedef struct intel_limit intel_limit_t;
140
typedef struct intel_limit intel_limit_t;
141
struct intel_limit {
141
struct intel_limit {
142
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
142
    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
143
    intel_p2_t      p2;
143
    intel_p2_t      p2;
144
};
144
};
145
 
145
 
146
int
146
int
147
intel_pch_rawclk(struct drm_device *dev)
147
intel_pch_rawclk(struct drm_device *dev)
148
{
148
{
149
	struct drm_i915_private *dev_priv = dev->dev_private;
149
	struct drm_i915_private *dev_priv = dev->dev_private;
150
 
150
 
151
	WARN_ON(!HAS_PCH_SPLIT(dev));
151
	WARN_ON(!HAS_PCH_SPLIT(dev));
152
 
152
 
153
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
153
	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
154
}
154
}
155
 
155
 
156
static inline u32 /* units of 100MHz */
156
static inline u32 /* units of 100MHz */
157
intel_fdi_link_freq(struct drm_device *dev)
157
intel_fdi_link_freq(struct drm_device *dev)
158
{
158
{
159
	if (IS_GEN5(dev)) {
159
	if (IS_GEN5(dev)) {
160
		struct drm_i915_private *dev_priv = dev->dev_private;
160
		struct drm_i915_private *dev_priv = dev->dev_private;
161
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
161
		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
162
	} else
162
	} else
163
		return 27;
163
		return 27;
164
}
164
}
165
 
165
 
166
static const intel_limit_t intel_limits_i8xx_dac = {
166
static const intel_limit_t intel_limits_i8xx_dac = {
167
	.dot = { .min = 25000, .max = 350000 },
167
	.dot = { .min = 25000, .max = 350000 },
168
	.vco = { .min = 908000, .max = 1512000 },
168
	.vco = { .min = 908000, .max = 1512000 },
169
	.n = { .min = 2, .max = 16 },
169
	.n = { .min = 2, .max = 16 },
170
	.m = { .min = 96, .max = 140 },
170
	.m = { .min = 96, .max = 140 },
171
	.m1 = { .min = 18, .max = 26 },
171
	.m1 = { .min = 18, .max = 26 },
172
	.m2 = { .min = 6, .max = 16 },
172
	.m2 = { .min = 6, .max = 16 },
173
	.p = { .min = 4, .max = 128 },
173
	.p = { .min = 4, .max = 128 },
174
	.p1 = { .min = 2, .max = 33 },
174
	.p1 = { .min = 2, .max = 33 },
175
	.p2 = { .dot_limit = 165000,
175
	.p2 = { .dot_limit = 165000,
176
		.p2_slow = 4, .p2_fast = 2 },
176
		.p2_slow = 4, .p2_fast = 2 },
177
};
177
};
178
 
178
 
179
static const intel_limit_t intel_limits_i8xx_dvo = {
179
static const intel_limit_t intel_limits_i8xx_dvo = {
180
        .dot = { .min = 25000, .max = 350000 },
180
        .dot = { .min = 25000, .max = 350000 },
181
	.vco = { .min = 908000, .max = 1512000 },
181
	.vco = { .min = 908000, .max = 1512000 },
182
	.n = { .min = 2, .max = 16 },
182
	.n = { .min = 2, .max = 16 },
183
        .m = { .min = 96, .max = 140 },
183
        .m = { .min = 96, .max = 140 },
184
        .m1 = { .min = 18, .max = 26 },
184
        .m1 = { .min = 18, .max = 26 },
185
        .m2 = { .min = 6, .max = 16 },
185
        .m2 = { .min = 6, .max = 16 },
186
        .p = { .min = 4, .max = 128 },
186
        .p = { .min = 4, .max = 128 },
187
        .p1 = { .min = 2, .max = 33 },
187
        .p1 = { .min = 2, .max = 33 },
188
	.p2 = { .dot_limit = 165000,
188
	.p2 = { .dot_limit = 165000,
189
		.p2_slow = 4, .p2_fast = 4 },
189
		.p2_slow = 4, .p2_fast = 4 },
190
};
190
};
191
 
191
 
192
static const intel_limit_t intel_limits_i8xx_lvds = {
192
static const intel_limit_t intel_limits_i8xx_lvds = {
193
        .dot = { .min = 25000, .max = 350000 },
193
        .dot = { .min = 25000, .max = 350000 },
194
	.vco = { .min = 908000, .max = 1512000 },
194
	.vco = { .min = 908000, .max = 1512000 },
195
	.n = { .min = 2, .max = 16 },
195
	.n = { .min = 2, .max = 16 },
196
        .m = { .min = 96, .max = 140 },
196
        .m = { .min = 96, .max = 140 },
197
        .m1 = { .min = 18, .max = 26 },
197
        .m1 = { .min = 18, .max = 26 },
198
        .m2 = { .min = 6, .max = 16 },
198
        .m2 = { .min = 6, .max = 16 },
199
        .p = { .min = 4, .max = 128 },
199
        .p = { .min = 4, .max = 128 },
200
        .p1 = { .min = 1, .max = 6 },
200
        .p1 = { .min = 1, .max = 6 },
201
	.p2 = { .dot_limit = 165000,
201
	.p2 = { .dot_limit = 165000,
202
		.p2_slow = 14, .p2_fast = 7 },
202
		.p2_slow = 14, .p2_fast = 7 },
203
};
203
};
204
 
204
 
205
static const intel_limit_t intel_limits_i9xx_sdvo = {
205
static const intel_limit_t intel_limits_i9xx_sdvo = {
206
        .dot = { .min = 20000, .max = 400000 },
206
        .dot = { .min = 20000, .max = 400000 },
207
        .vco = { .min = 1400000, .max = 2800000 },
207
        .vco = { .min = 1400000, .max = 2800000 },
208
        .n = { .min = 1, .max = 6 },
208
        .n = { .min = 1, .max = 6 },
209
        .m = { .min = 70, .max = 120 },
209
        .m = { .min = 70, .max = 120 },
210
	.m1 = { .min = 8, .max = 18 },
210
	.m1 = { .min = 8, .max = 18 },
211
	.m2 = { .min = 3, .max = 7 },
211
	.m2 = { .min = 3, .max = 7 },
212
        .p = { .min = 5, .max = 80 },
212
        .p = { .min = 5, .max = 80 },
213
        .p1 = { .min = 1, .max = 8 },
213
        .p1 = { .min = 1, .max = 8 },
214
	.p2 = { .dot_limit = 200000,
214
	.p2 = { .dot_limit = 200000,
215
		.p2_slow = 10, .p2_fast = 5 },
215
		.p2_slow = 10, .p2_fast = 5 },
216
};
216
};
217
 
217
 
218
static const intel_limit_t intel_limits_i9xx_lvds = {
218
static const intel_limit_t intel_limits_i9xx_lvds = {
219
        .dot = { .min = 20000, .max = 400000 },
219
        .dot = { .min = 20000, .max = 400000 },
220
        .vco = { .min = 1400000, .max = 2800000 },
220
        .vco = { .min = 1400000, .max = 2800000 },
221
        .n = { .min = 1, .max = 6 },
221
        .n = { .min = 1, .max = 6 },
222
        .m = { .min = 70, .max = 120 },
222
        .m = { .min = 70, .max = 120 },
223
	.m1 = { .min = 8, .max = 18 },
223
	.m1 = { .min = 8, .max = 18 },
224
	.m2 = { .min = 3, .max = 7 },
224
	.m2 = { .min = 3, .max = 7 },
225
        .p = { .min = 7, .max = 98 },
225
        .p = { .min = 7, .max = 98 },
226
        .p1 = { .min = 1, .max = 8 },
226
        .p1 = { .min = 1, .max = 8 },
227
	.p2 = { .dot_limit = 112000,
227
	.p2 = { .dot_limit = 112000,
228
		.p2_slow = 14, .p2_fast = 7 },
228
		.p2_slow = 14, .p2_fast = 7 },
229
};
229
};
230
 
230
 
231
 
231
 
232
static const intel_limit_t intel_limits_g4x_sdvo = {
232
static const intel_limit_t intel_limits_g4x_sdvo = {
233
	.dot = { .min = 25000, .max = 270000 },
233
	.dot = { .min = 25000, .max = 270000 },
234
	.vco = { .min = 1750000, .max = 3500000},
234
	.vco = { .min = 1750000, .max = 3500000},
235
	.n = { .min = 1, .max = 4 },
235
	.n = { .min = 1, .max = 4 },
236
	.m = { .min = 104, .max = 138 },
236
	.m = { .min = 104, .max = 138 },
237
	.m1 = { .min = 17, .max = 23 },
237
	.m1 = { .min = 17, .max = 23 },
238
	.m2 = { .min = 5, .max = 11 },
238
	.m2 = { .min = 5, .max = 11 },
239
	.p = { .min = 10, .max = 30 },
239
	.p = { .min = 10, .max = 30 },
240
	.p1 = { .min = 1, .max = 3},
240
	.p1 = { .min = 1, .max = 3},
241
	.p2 = { .dot_limit = 270000,
241
	.p2 = { .dot_limit = 270000,
242
		.p2_slow = 10,
242
		.p2_slow = 10,
243
		.p2_fast = 10
243
		.p2_fast = 10
244
	},
244
	},
245
};
245
};
246
 
246
 
247
static const intel_limit_t intel_limits_g4x_hdmi = {
247
static const intel_limit_t intel_limits_g4x_hdmi = {
248
	.dot = { .min = 22000, .max = 400000 },
248
	.dot = { .min = 22000, .max = 400000 },
249
	.vco = { .min = 1750000, .max = 3500000},
249
	.vco = { .min = 1750000, .max = 3500000},
250
	.n = { .min = 1, .max = 4 },
250
	.n = { .min = 1, .max = 4 },
251
	.m = { .min = 104, .max = 138 },
251
	.m = { .min = 104, .max = 138 },
252
	.m1 = { .min = 16, .max = 23 },
252
	.m1 = { .min = 16, .max = 23 },
253
	.m2 = { .min = 5, .max = 11 },
253
	.m2 = { .min = 5, .max = 11 },
254
	.p = { .min = 5, .max = 80 },
254
	.p = { .min = 5, .max = 80 },
255
	.p1 = { .min = 1, .max = 8},
255
	.p1 = { .min = 1, .max = 8},
256
	.p2 = { .dot_limit = 165000,
256
	.p2 = { .dot_limit = 165000,
257
		.p2_slow = 10, .p2_fast = 5 },
257
		.p2_slow = 10, .p2_fast = 5 },
258
};
258
};
259
 
259
 
260
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
260
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
261
	.dot = { .min = 20000, .max = 115000 },
261
	.dot = { .min = 20000, .max = 115000 },
262
	.vco = { .min = 1750000, .max = 3500000 },
262
	.vco = { .min = 1750000, .max = 3500000 },
263
	.n = { .min = 1, .max = 3 },
263
	.n = { .min = 1, .max = 3 },
264
	.m = { .min = 104, .max = 138 },
264
	.m = { .min = 104, .max = 138 },
265
	.m1 = { .min = 17, .max = 23 },
265
	.m1 = { .min = 17, .max = 23 },
266
	.m2 = { .min = 5, .max = 11 },
266
	.m2 = { .min = 5, .max = 11 },
267
	.p = { .min = 28, .max = 112 },
267
	.p = { .min = 28, .max = 112 },
268
	.p1 = { .min = 2, .max = 8 },
268
	.p1 = { .min = 2, .max = 8 },
269
	.p2 = { .dot_limit = 0,
269
	.p2 = { .dot_limit = 0,
270
		.p2_slow = 14, .p2_fast = 14
270
		.p2_slow = 14, .p2_fast = 14
271
	},
271
	},
272
};
272
};
273
 
273
 
274
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
274
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
275
	.dot = { .min = 80000, .max = 224000 },
275
	.dot = { .min = 80000, .max = 224000 },
276
	.vco = { .min = 1750000, .max = 3500000 },
276
	.vco = { .min = 1750000, .max = 3500000 },
277
	.n = { .min = 1, .max = 3 },
277
	.n = { .min = 1, .max = 3 },
278
	.m = { .min = 104, .max = 138 },
278
	.m = { .min = 104, .max = 138 },
279
	.m1 = { .min = 17, .max = 23 },
279
	.m1 = { .min = 17, .max = 23 },
280
	.m2 = { .min = 5, .max = 11 },
280
	.m2 = { .min = 5, .max = 11 },
281
	.p = { .min = 14, .max = 42 },
281
	.p = { .min = 14, .max = 42 },
282
	.p1 = { .min = 2, .max = 6 },
282
	.p1 = { .min = 2, .max = 6 },
283
	.p2 = { .dot_limit = 0,
283
	.p2 = { .dot_limit = 0,
284
		.p2_slow = 7, .p2_fast = 7
284
		.p2_slow = 7, .p2_fast = 7
285
	},
285
	},
286
};
286
};
287
 
287
 
288
static const intel_limit_t intel_limits_pineview_sdvo = {
288
static const intel_limit_t intel_limits_pineview_sdvo = {
289
        .dot = { .min = 20000, .max = 400000},
289
        .dot = { .min = 20000, .max = 400000},
290
        .vco = { .min = 1700000, .max = 3500000 },
290
        .vco = { .min = 1700000, .max = 3500000 },
291
	/* Pineview's Ncounter is a ring counter */
291
	/* Pineview's Ncounter is a ring counter */
292
        .n = { .min = 3, .max = 6 },
292
        .n = { .min = 3, .max = 6 },
293
        .m = { .min = 2, .max = 256 },
293
        .m = { .min = 2, .max = 256 },
294
	/* Pineview only has one combined m divider, which we treat as m2. */
294
	/* Pineview only has one combined m divider, which we treat as m2. */
295
        .m1 = { .min = 0, .max = 0 },
295
        .m1 = { .min = 0, .max = 0 },
296
        .m2 = { .min = 0, .max = 254 },
296
        .m2 = { .min = 0, .max = 254 },
297
        .p = { .min = 5, .max = 80 },
297
        .p = { .min = 5, .max = 80 },
298
        .p1 = { .min = 1, .max = 8 },
298
        .p1 = { .min = 1, .max = 8 },
299
	.p2 = { .dot_limit = 200000,
299
	.p2 = { .dot_limit = 200000,
300
		.p2_slow = 10, .p2_fast = 5 },
300
		.p2_slow = 10, .p2_fast = 5 },
301
};
301
};
302
 
302
 
303
static const intel_limit_t intel_limits_pineview_lvds = {
303
static const intel_limit_t intel_limits_pineview_lvds = {
304
        .dot = { .min = 20000, .max = 400000 },
304
        .dot = { .min = 20000, .max = 400000 },
305
        .vco = { .min = 1700000, .max = 3500000 },
305
        .vco = { .min = 1700000, .max = 3500000 },
306
        .n = { .min = 3, .max = 6 },
306
        .n = { .min = 3, .max = 6 },
307
        .m = { .min = 2, .max = 256 },
307
        .m = { .min = 2, .max = 256 },
308
        .m1 = { .min = 0, .max = 0 },
308
        .m1 = { .min = 0, .max = 0 },
309
        .m2 = { .min = 0, .max = 254 },
309
        .m2 = { .min = 0, .max = 254 },
310
        .p = { .min = 7, .max = 112 },
310
        .p = { .min = 7, .max = 112 },
311
        .p1 = { .min = 1, .max = 8 },
311
        .p1 = { .min = 1, .max = 8 },
312
	.p2 = { .dot_limit = 112000,
312
	.p2 = { .dot_limit = 112000,
313
		.p2_slow = 14, .p2_fast = 14 },
313
		.p2_slow = 14, .p2_fast = 14 },
314
};
314
};
315
 
315
 
316
/* Ironlake / Sandybridge
316
/* Ironlake / Sandybridge
317
 *
317
 *
318
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
318
 * We calculate clock using (register_value + 2) for N/M1/M2, so here
319
 * the range value for them is (actual_value - 2).
319
 * the range value for them is (actual_value - 2).
320
 */
320
 */
321
static const intel_limit_t intel_limits_ironlake_dac = {
321
static const intel_limit_t intel_limits_ironlake_dac = {
322
	.dot = { .min = 25000, .max = 350000 },
322
	.dot = { .min = 25000, .max = 350000 },
323
	.vco = { .min = 1760000, .max = 3510000 },
323
	.vco = { .min = 1760000, .max = 3510000 },
324
	.n = { .min = 1, .max = 5 },
324
	.n = { .min = 1, .max = 5 },
325
	.m = { .min = 79, .max = 127 },
325
	.m = { .min = 79, .max = 127 },
326
	.m1 = { .min = 12, .max = 22 },
326
	.m1 = { .min = 12, .max = 22 },
327
	.m2 = { .min = 5, .max = 9 },
327
	.m2 = { .min = 5, .max = 9 },
328
	.p = { .min = 5, .max = 80 },
328
	.p = { .min = 5, .max = 80 },
329
	.p1 = { .min = 1, .max = 8 },
329
	.p1 = { .min = 1, .max = 8 },
330
	.p2 = { .dot_limit = 225000,
330
	.p2 = { .dot_limit = 225000,
331
		.p2_slow = 10, .p2_fast = 5 },
331
		.p2_slow = 10, .p2_fast = 5 },
332
};
332
};
333
 
333
 
334
static const intel_limit_t intel_limits_ironlake_single_lvds = {
334
static const intel_limit_t intel_limits_ironlake_single_lvds = {
335
	.dot = { .min = 25000, .max = 350000 },
335
	.dot = { .min = 25000, .max = 350000 },
336
	.vco = { .min = 1760000, .max = 3510000 },
336
	.vco = { .min = 1760000, .max = 3510000 },
337
	.n = { .min = 1, .max = 3 },
337
	.n = { .min = 1, .max = 3 },
338
	.m = { .min = 79, .max = 118 },
338
	.m = { .min = 79, .max = 118 },
339
	.m1 = { .min = 12, .max = 22 },
339
	.m1 = { .min = 12, .max = 22 },
340
	.m2 = { .min = 5, .max = 9 },
340
	.m2 = { .min = 5, .max = 9 },
341
	.p = { .min = 28, .max = 112 },
341
	.p = { .min = 28, .max = 112 },
342
	.p1 = { .min = 2, .max = 8 },
342
	.p1 = { .min = 2, .max = 8 },
343
	.p2 = { .dot_limit = 225000,
343
	.p2 = { .dot_limit = 225000,
344
		.p2_slow = 14, .p2_fast = 14 },
344
		.p2_slow = 14, .p2_fast = 14 },
345
};
345
};
346
 
346
 
347
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
347
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
348
	.dot = { .min = 25000, .max = 350000 },
348
	.dot = { .min = 25000, .max = 350000 },
349
	.vco = { .min = 1760000, .max = 3510000 },
349
	.vco = { .min = 1760000, .max = 3510000 },
350
	.n = { .min = 1, .max = 3 },
350
	.n = { .min = 1, .max = 3 },
351
	.m = { .min = 79, .max = 127 },
351
	.m = { .min = 79, .max = 127 },
352
	.m1 = { .min = 12, .max = 22 },
352
	.m1 = { .min = 12, .max = 22 },
353
	.m2 = { .min = 5, .max = 9 },
353
	.m2 = { .min = 5, .max = 9 },
354
	.p = { .min = 14, .max = 56 },
354
	.p = { .min = 14, .max = 56 },
355
	.p1 = { .min = 2, .max = 8 },
355
	.p1 = { .min = 2, .max = 8 },
356
	.p2 = { .dot_limit = 225000,
356
	.p2 = { .dot_limit = 225000,
357
		.p2_slow = 7, .p2_fast = 7 },
357
		.p2_slow = 7, .p2_fast = 7 },
358
};
358
};
359
 
359
 
360
/* LVDS 100mhz refclk limits. */
360
/* LVDS 100mhz refclk limits. */
361
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
361
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
362
	.dot = { .min = 25000, .max = 350000 },
362
	.dot = { .min = 25000, .max = 350000 },
363
	.vco = { .min = 1760000, .max = 3510000 },
363
	.vco = { .min = 1760000, .max = 3510000 },
364
	.n = { .min = 1, .max = 2 },
364
	.n = { .min = 1, .max = 2 },
365
	.m = { .min = 79, .max = 126 },
365
	.m = { .min = 79, .max = 126 },
366
	.m1 = { .min = 12, .max = 22 },
366
	.m1 = { .min = 12, .max = 22 },
367
	.m2 = { .min = 5, .max = 9 },
367
	.m2 = { .min = 5, .max = 9 },
368
	.p = { .min = 28, .max = 112 },
368
	.p = { .min = 28, .max = 112 },
369
	.p1 = { .min = 2, .max = 8 },
369
	.p1 = { .min = 2, .max = 8 },
370
	.p2 = { .dot_limit = 225000,
370
	.p2 = { .dot_limit = 225000,
371
		.p2_slow = 14, .p2_fast = 14 },
371
		.p2_slow = 14, .p2_fast = 14 },
372
};
372
};
373
 
373
 
374
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
374
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
375
	.dot = { .min = 25000, .max = 350000 },
375
	.dot = { .min = 25000, .max = 350000 },
376
	.vco = { .min = 1760000, .max = 3510000 },
376
	.vco = { .min = 1760000, .max = 3510000 },
377
	.n = { .min = 1, .max = 3 },
377
	.n = { .min = 1, .max = 3 },
378
	.m = { .min = 79, .max = 126 },
378
	.m = { .min = 79, .max = 126 },
379
	.m1 = { .min = 12, .max = 22 },
379
	.m1 = { .min = 12, .max = 22 },
380
	.m2 = { .min = 5, .max = 9 },
380
	.m2 = { .min = 5, .max = 9 },
381
	.p = { .min = 14, .max = 42 },
381
	.p = { .min = 14, .max = 42 },
382
	.p1 = { .min = 2, .max = 6 },
382
	.p1 = { .min = 2, .max = 6 },
383
	.p2 = { .dot_limit = 225000,
383
	.p2 = { .dot_limit = 225000,
384
		.p2_slow = 7, .p2_fast = 7 },
384
		.p2_slow = 7, .p2_fast = 7 },
385
};
385
};
386
 
386
 
387
static const intel_limit_t intel_limits_vlv = {
387
static const intel_limit_t intel_limits_vlv = {
388
	 /*
388
	 /*
389
	  * These are the data rate limits (measured in fast clocks)
389
	  * These are the data rate limits (measured in fast clocks)
390
	  * since those are the strictest limits we have. The fast
390
	  * since those are the strictest limits we have. The fast
391
	  * clock and actual rate limits are more relaxed, so checking
391
	  * clock and actual rate limits are more relaxed, so checking
392
	  * them would make no difference.
392
	  * them would make no difference.
393
	  */
393
	  */
394
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
394
	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
395
	.vco = { .min = 4000000, .max = 6000000 },
395
	.vco = { .min = 4000000, .max = 6000000 },
396
	.n = { .min = 1, .max = 7 },
396
	.n = { .min = 1, .max = 7 },
397
	.m1 = { .min = 2, .max = 3 },
397
	.m1 = { .min = 2, .max = 3 },
398
	.m2 = { .min = 11, .max = 156 },
398
	.m2 = { .min = 11, .max = 156 },
399
	.p1 = { .min = 2, .max = 3 },
399
	.p1 = { .min = 2, .max = 3 },
400
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
400
	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
401
};
401
};
402
 
402
 
403
static const intel_limit_t intel_limits_chv = {
403
static const intel_limit_t intel_limits_chv = {
404
	/*
404
	/*
405
	 * These are the data rate limits (measured in fast clocks)
405
	 * These are the data rate limits (measured in fast clocks)
406
	 * since those are the strictest limits we have.  The fast
406
	 * since those are the strictest limits we have.  The fast
407
	 * clock and actual rate limits are more relaxed, so checking
407
	 * clock and actual rate limits are more relaxed, so checking
408
	 * them would make no difference.
408
	 * them would make no difference.
409
	 */
409
	 */
410
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
410
	.dot = { .min = 25000 * 5, .max = 540000 * 5},
411
	.vco = { .min = 4860000, .max = 6700000 },
411
	.vco = { .min = 4860000, .max = 6700000 },
412
	.n = { .min = 1, .max = 1 },
412
	.n = { .min = 1, .max = 1 },
413
	.m1 = { .min = 2, .max = 2 },
413
	.m1 = { .min = 2, .max = 2 },
414
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
414
	.m2 = { .min = 24 << 22, .max = 175 << 22 },
415
	.p1 = { .min = 2, .max = 4 },
415
	.p1 = { .min = 2, .max = 4 },
416
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
416
	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
417
};
417
};
418
 
418
 
419
static void vlv_clock(int refclk, intel_clock_t *clock)
419
static void vlv_clock(int refclk, intel_clock_t *clock)
420
{
420
{
421
	clock->m = clock->m1 * clock->m2;
421
	clock->m = clock->m1 * clock->m2;
422
	clock->p = clock->p1 * clock->p2;
422
	clock->p = clock->p1 * clock->p2;
423
	if (WARN_ON(clock->n == 0 || clock->p == 0))
423
	if (WARN_ON(clock->n == 0 || clock->p == 0))
424
		return;
424
		return;
425
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
425
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
426
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
426
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
427
}
427
}
428
 
428
 
429
/**
429
/**
430
 * Returns whether any output on the specified pipe is of the specified type
430
 * Returns whether any output on the specified pipe is of the specified type
431
 */
431
 */
432
static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
432
static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
433
{
433
{
434
	struct drm_device *dev = crtc->dev;
434
	struct drm_device *dev = crtc->dev;
435
	struct intel_encoder *encoder;
435
	struct intel_encoder *encoder;
436
 
436
 
437
	for_each_encoder_on_crtc(dev, crtc, encoder)
437
	for_each_encoder_on_crtc(dev, crtc, encoder)
438
		if (encoder->type == type)
438
		if (encoder->type == type)
439
			return true;
439
			return true;
440
 
440
 
441
	return false;
441
	return false;
442
}
442
}
443
 
443
 
444
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
444
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
445
						int refclk)
445
						int refclk)
446
{
446
{
447
	struct drm_device *dev = crtc->dev;
447
	struct drm_device *dev = crtc->dev;
448
	const intel_limit_t *limit;
448
	const intel_limit_t *limit;
449
 
449
 
450
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
450
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
451
		if (intel_is_dual_link_lvds(dev)) {
451
		if (intel_is_dual_link_lvds(dev)) {
452
			if (refclk == 100000)
452
			if (refclk == 100000)
453
				limit = &intel_limits_ironlake_dual_lvds_100m;
453
				limit = &intel_limits_ironlake_dual_lvds_100m;
454
			else
454
			else
455
				limit = &intel_limits_ironlake_dual_lvds;
455
				limit = &intel_limits_ironlake_dual_lvds;
456
		} else {
456
		} else {
457
			if (refclk == 100000)
457
			if (refclk == 100000)
458
				limit = &intel_limits_ironlake_single_lvds_100m;
458
				limit = &intel_limits_ironlake_single_lvds_100m;
459
			else
459
			else
460
				limit = &intel_limits_ironlake_single_lvds;
460
				limit = &intel_limits_ironlake_single_lvds;
461
		}
461
		}
462
	} else
462
	} else
463
		limit = &intel_limits_ironlake_dac;
463
		limit = &intel_limits_ironlake_dac;
464
 
464
 
465
	return limit;
465
	return limit;
466
}
466
}
467
 
467
 
468
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
468
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
469
{
469
{
470
	struct drm_device *dev = crtc->dev;
470
	struct drm_device *dev = crtc->dev;
471
	const intel_limit_t *limit;
471
	const intel_limit_t *limit;
472
 
472
 
473
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
473
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
474
		if (intel_is_dual_link_lvds(dev))
474
		if (intel_is_dual_link_lvds(dev))
475
			limit = &intel_limits_g4x_dual_channel_lvds;
475
			limit = &intel_limits_g4x_dual_channel_lvds;
476
		else
476
		else
477
			limit = &intel_limits_g4x_single_channel_lvds;
477
			limit = &intel_limits_g4x_single_channel_lvds;
478
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
478
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
479
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
479
		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
480
		limit = &intel_limits_g4x_hdmi;
480
		limit = &intel_limits_g4x_hdmi;
481
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
481
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
482
		limit = &intel_limits_g4x_sdvo;
482
		limit = &intel_limits_g4x_sdvo;
483
	} else /* The option is for other outputs */
483
	} else /* The option is for other outputs */
484
		limit = &intel_limits_i9xx_sdvo;
484
		limit = &intel_limits_i9xx_sdvo;
485
 
485
 
486
	return limit;
486
	return limit;
487
}
487
}
488
 
488
 
489
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
489
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
490
{
490
{
491
	struct drm_device *dev = crtc->dev;
491
	struct drm_device *dev = crtc->dev;
492
	const intel_limit_t *limit;
492
	const intel_limit_t *limit;
493
 
493
 
494
	if (HAS_PCH_SPLIT(dev))
494
	if (HAS_PCH_SPLIT(dev))
495
		limit = intel_ironlake_limit(crtc, refclk);
495
		limit = intel_ironlake_limit(crtc, refclk);
496
	else if (IS_G4X(dev)) {
496
	else if (IS_G4X(dev)) {
497
		limit = intel_g4x_limit(crtc);
497
		limit = intel_g4x_limit(crtc);
498
	} else if (IS_PINEVIEW(dev)) {
498
	} else if (IS_PINEVIEW(dev)) {
499
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
499
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
500
			limit = &intel_limits_pineview_lvds;
500
			limit = &intel_limits_pineview_lvds;
501
		else
501
		else
502
			limit = &intel_limits_pineview_sdvo;
502
			limit = &intel_limits_pineview_sdvo;
503
	} else if (IS_CHERRYVIEW(dev)) {
503
	} else if (IS_CHERRYVIEW(dev)) {
504
		limit = &intel_limits_chv;
504
		limit = &intel_limits_chv;
505
	} else if (IS_VALLEYVIEW(dev)) {
505
	} else if (IS_VALLEYVIEW(dev)) {
506
		limit = &intel_limits_vlv;
506
		limit = &intel_limits_vlv;
507
	} else if (!IS_GEN2(dev)) {
507
	} else if (!IS_GEN2(dev)) {
508
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
508
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
509
			limit = &intel_limits_i9xx_lvds;
509
			limit = &intel_limits_i9xx_lvds;
510
		else
510
		else
511
			limit = &intel_limits_i9xx_sdvo;
511
			limit = &intel_limits_i9xx_sdvo;
512
	} else {
512
	} else {
513
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
513
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
514
			limit = &intel_limits_i8xx_lvds;
514
			limit = &intel_limits_i8xx_lvds;
515
		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
515
		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
516
			limit = &intel_limits_i8xx_dvo;
516
			limit = &intel_limits_i8xx_dvo;
517
		else
517
		else
518
			limit = &intel_limits_i8xx_dac;
518
			limit = &intel_limits_i8xx_dac;
519
	}
519
	}
520
	return limit;
520
	return limit;
521
}
521
}
522
 
522
 
523
/* m1 is reserved as 0 in Pineview, n is a ring counter */
523
/* m1 is reserved as 0 in Pineview, n is a ring counter */
524
static void pineview_clock(int refclk, intel_clock_t *clock)
524
static void pineview_clock(int refclk, intel_clock_t *clock)
525
{
525
{
526
	clock->m = clock->m2 + 2;
526
	clock->m = clock->m2 + 2;
527
	clock->p = clock->p1 * clock->p2;
527
	clock->p = clock->p1 * clock->p2;
528
	if (WARN_ON(clock->n == 0 || clock->p == 0))
528
	if (WARN_ON(clock->n == 0 || clock->p == 0))
529
		return;
529
		return;
530
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
530
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
531
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
531
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
532
}
532
}
533
 
533
 
534
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
534
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
535
{
535
{
536
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
536
	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
537
}
537
}
538
 
538
 
539
static void i9xx_clock(int refclk, intel_clock_t *clock)
539
static void i9xx_clock(int refclk, intel_clock_t *clock)
540
{
540
{
541
	clock->m = i9xx_dpll_compute_m(clock);
541
	clock->m = i9xx_dpll_compute_m(clock);
542
	clock->p = clock->p1 * clock->p2;
542
	clock->p = clock->p1 * clock->p2;
543
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
543
	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
544
		return;
544
		return;
545
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
545
	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
546
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
546
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
547
}
547
}
548
 
548
 
549
static void chv_clock(int refclk, intel_clock_t *clock)
549
static void chv_clock(int refclk, intel_clock_t *clock)
550
{
550
{
551
	clock->m = clock->m1 * clock->m2;
551
	clock->m = clock->m1 * clock->m2;
552
	clock->p = clock->p1 * clock->p2;
552
	clock->p = clock->p1 * clock->p2;
553
	if (WARN_ON(clock->n == 0 || clock->p == 0))
553
	if (WARN_ON(clock->n == 0 || clock->p == 0))
554
		return;
554
		return;
555
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
555
	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
556
			clock->n << 22);
556
			clock->n << 22);
557
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
557
	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
558
}
558
}
559
 
559
 
560
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
560
#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
561
/**
561
/**
562
 * Returns whether the given set of divisors are valid for a given refclk with
562
 * Returns whether the given set of divisors are valid for a given refclk with
563
 * the given connectors.
563
 * the given connectors.
564
 */
564
 */
565
 
565
 
566
static bool intel_PLL_is_valid(struct drm_device *dev,
566
static bool intel_PLL_is_valid(struct drm_device *dev,
567
			       const intel_limit_t *limit,
567
			       const intel_limit_t *limit,
568
			       const intel_clock_t *clock)
568
			       const intel_clock_t *clock)
569
{
569
{
570
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
570
	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
571
		INTELPllInvalid("n out of range\n");
571
		INTELPllInvalid("n out of range\n");
572
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
572
	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
573
		INTELPllInvalid("p1 out of range\n");
573
		INTELPllInvalid("p1 out of range\n");
574
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
574
	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
575
		INTELPllInvalid("m2 out of range\n");
575
		INTELPllInvalid("m2 out of range\n");
576
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
576
	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
577
		INTELPllInvalid("m1 out of range\n");
577
		INTELPllInvalid("m1 out of range\n");
578
 
578
 
579
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
579
	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
580
		if (clock->m1 <= clock->m2)
580
		if (clock->m1 <= clock->m2)
581
		INTELPllInvalid("m1 <= m2\n");
581
		INTELPllInvalid("m1 <= m2\n");
582
 
582
 
583
	if (!IS_VALLEYVIEW(dev)) {
583
	if (!IS_VALLEYVIEW(dev)) {
584
		if (clock->p < limit->p.min || limit->p.max < clock->p)
584
		if (clock->p < limit->p.min || limit->p.max < clock->p)
585
			INTELPllInvalid("p out of range\n");
585
			INTELPllInvalid("p out of range\n");
586
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
586
	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
587
		INTELPllInvalid("m out of range\n");
587
		INTELPllInvalid("m out of range\n");
588
	}
588
	}
589
 
589
 
590
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
590
	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
591
		INTELPllInvalid("vco out of range\n");
591
		INTELPllInvalid("vco out of range\n");
592
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
592
	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
593
	 * connector, etc., rather than just a single range.
593
	 * connector, etc., rather than just a single range.
594
	 */
594
	 */
595
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
595
	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
596
		INTELPllInvalid("dot out of range\n");
596
		INTELPllInvalid("dot out of range\n");
597
 
597
 
598
	return true;
598
	return true;
599
}
599
}
600
 
600
 
601
static bool
601
static bool
602
i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
602
i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
603
		    int target, int refclk, intel_clock_t *match_clock,
603
		    int target, int refclk, intel_clock_t *match_clock,
604
		    intel_clock_t *best_clock)
604
		    intel_clock_t *best_clock)
605
{
605
{
606
	struct drm_device *dev = crtc->dev;
606
	struct drm_device *dev = crtc->dev;
607
	intel_clock_t clock;
607
	intel_clock_t clock;
608
	int err = target;
608
	int err = target;
609
 
609
 
610
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
610
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
611
		/*
611
		/*
612
		 * For LVDS just rely on its current settings for dual-channel.
612
		 * For LVDS just rely on its current settings for dual-channel.
613
		 * We haven't figured out how to reliably set up different
613
		 * We haven't figured out how to reliably set up different
614
		 * single/dual channel state, if we even can.
614
		 * single/dual channel state, if we even can.
615
		 */
615
		 */
616
		if (intel_is_dual_link_lvds(dev))
616
		if (intel_is_dual_link_lvds(dev))
617
			clock.p2 = limit->p2.p2_fast;
617
			clock.p2 = limit->p2.p2_fast;
618
		else
618
		else
619
			clock.p2 = limit->p2.p2_slow;
619
			clock.p2 = limit->p2.p2_slow;
620
	} else {
620
	} else {
621
		if (target < limit->p2.dot_limit)
621
		if (target < limit->p2.dot_limit)
622
			clock.p2 = limit->p2.p2_slow;
622
			clock.p2 = limit->p2.p2_slow;
623
		else
623
		else
624
			clock.p2 = limit->p2.p2_fast;
624
			clock.p2 = limit->p2.p2_fast;
625
	}
625
	}
626
 
626
 
627
	memset(best_clock, 0, sizeof(*best_clock));
627
	memset(best_clock, 0, sizeof(*best_clock));
628
 
628
 
629
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
629
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
630
	     clock.m1++) {
630
	     clock.m1++) {
631
		for (clock.m2 = limit->m2.min;
631
		for (clock.m2 = limit->m2.min;
632
		     clock.m2 <= limit->m2.max; clock.m2++) {
632
		     clock.m2 <= limit->m2.max; clock.m2++) {
633
			if (clock.m2 >= clock.m1)
633
			if (clock.m2 >= clock.m1)
634
				break;
634
				break;
635
			for (clock.n = limit->n.min;
635
			for (clock.n = limit->n.min;
636
			     clock.n <= limit->n.max; clock.n++) {
636
			     clock.n <= limit->n.max; clock.n++) {
637
				for (clock.p1 = limit->p1.min;
637
				for (clock.p1 = limit->p1.min;
638
					clock.p1 <= limit->p1.max; clock.p1++) {
638
					clock.p1 <= limit->p1.max; clock.p1++) {
639
					int this_err;
639
					int this_err;
640
 
640
 
641
					i9xx_clock(refclk, &clock);
641
					i9xx_clock(refclk, &clock);
642
					if (!intel_PLL_is_valid(dev, limit,
642
					if (!intel_PLL_is_valid(dev, limit,
643
								&clock))
643
								&clock))
644
						continue;
644
						continue;
645
					if (match_clock &&
645
					if (match_clock &&
646
					    clock.p != match_clock->p)
646
					    clock.p != match_clock->p)
647
						continue;
647
						continue;
648
 
648
 
649
					this_err = abs(clock.dot - target);
649
					this_err = abs(clock.dot - target);
650
					if (this_err < err) {
650
					if (this_err < err) {
651
						*best_clock = clock;
651
						*best_clock = clock;
652
						err = this_err;
652
						err = this_err;
653
					}
653
					}
654
				}
654
				}
655
			}
655
			}
656
		}
656
		}
657
	}
657
	}
658
 
658
 
659
	return (err != target);
659
	return (err != target);
660
}
660
}
661
 
661
 
662
static bool
662
static bool
663
pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
663
pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
664
		   int target, int refclk, intel_clock_t *match_clock,
664
		   int target, int refclk, intel_clock_t *match_clock,
665
		   intel_clock_t *best_clock)
665
		   intel_clock_t *best_clock)
666
{
666
{
667
	struct drm_device *dev = crtc->dev;
667
	struct drm_device *dev = crtc->dev;
668
	intel_clock_t clock;
668
	intel_clock_t clock;
669
	int err = target;
669
	int err = target;
670
 
670
 
671
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
671
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
672
		/*
672
		/*
673
		 * For LVDS just rely on its current settings for dual-channel.
673
		 * For LVDS just rely on its current settings for dual-channel.
674
		 * We haven't figured out how to reliably set up different
674
		 * We haven't figured out how to reliably set up different
675
		 * single/dual channel state, if we even can.
675
		 * single/dual channel state, if we even can.
676
		 */
676
		 */
677
		if (intel_is_dual_link_lvds(dev))
677
		if (intel_is_dual_link_lvds(dev))
678
			clock.p2 = limit->p2.p2_fast;
678
			clock.p2 = limit->p2.p2_fast;
679
		else
679
		else
680
			clock.p2 = limit->p2.p2_slow;
680
			clock.p2 = limit->p2.p2_slow;
681
	} else {
681
	} else {
682
		if (target < limit->p2.dot_limit)
682
		if (target < limit->p2.dot_limit)
683
			clock.p2 = limit->p2.p2_slow;
683
			clock.p2 = limit->p2.p2_slow;
684
		else
684
		else
685
			clock.p2 = limit->p2.p2_fast;
685
			clock.p2 = limit->p2.p2_fast;
686
	}
686
	}
687
 
687
 
688
	memset(best_clock, 0, sizeof(*best_clock));
688
	memset(best_clock, 0, sizeof(*best_clock));
689
 
689
 
690
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
690
	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
691
	     clock.m1++) {
691
	     clock.m1++) {
692
		for (clock.m2 = limit->m2.min;
692
		for (clock.m2 = limit->m2.min;
693
		     clock.m2 <= limit->m2.max; clock.m2++) {
693
		     clock.m2 <= limit->m2.max; clock.m2++) {
694
			for (clock.n = limit->n.min;
694
			for (clock.n = limit->n.min;
695
			     clock.n <= limit->n.max; clock.n++) {
695
			     clock.n <= limit->n.max; clock.n++) {
696
				for (clock.p1 = limit->p1.min;
696
				for (clock.p1 = limit->p1.min;
697
					clock.p1 <= limit->p1.max; clock.p1++) {
697
					clock.p1 <= limit->p1.max; clock.p1++) {
698
					int this_err;
698
					int this_err;
699
 
699
 
700
					pineview_clock(refclk, &clock);
700
					pineview_clock(refclk, &clock);
701
					if (!intel_PLL_is_valid(dev, limit,
701
					if (!intel_PLL_is_valid(dev, limit,
702
								&clock))
702
								&clock))
703
						continue;
703
						continue;
704
					if (match_clock &&
704
					if (match_clock &&
705
					    clock.p != match_clock->p)
705
					    clock.p != match_clock->p)
706
						continue;
706
						continue;
707
 
707
 
708
					this_err = abs(clock.dot - target);
708
					this_err = abs(clock.dot - target);
709
					if (this_err < err) {
709
					if (this_err < err) {
710
						*best_clock = clock;
710
						*best_clock = clock;
711
						err = this_err;
711
						err = this_err;
712
					}
712
					}
713
				}
713
				}
714
			}
714
			}
715
		}
715
		}
716
	}
716
	}
717
 
717
 
718
	return (err != target);
718
	return (err != target);
719
}
719
}
720
 
720
 
721
static bool
721
static bool
722
g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
722
g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
723
			int target, int refclk, intel_clock_t *match_clock,
723
			int target, int refclk, intel_clock_t *match_clock,
724
			intel_clock_t *best_clock)
724
			intel_clock_t *best_clock)
725
{
725
{
726
	struct drm_device *dev = crtc->dev;
726
	struct drm_device *dev = crtc->dev;
727
	intel_clock_t clock;
727
	intel_clock_t clock;
728
	int max_n;
728
	int max_n;
729
	bool found;
729
	bool found;
730
	/* approximately equals target * 0.00585 */
730
	/* approximately equals target * 0.00585 */
731
	int err_most = (target >> 8) + (target >> 9);
731
	int err_most = (target >> 8) + (target >> 9);
732
	found = false;
732
	found = false;
733
 
733
 
734
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
734
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
735
		if (intel_is_dual_link_lvds(dev))
735
		if (intel_is_dual_link_lvds(dev))
736
			clock.p2 = limit->p2.p2_fast;
736
			clock.p2 = limit->p2.p2_fast;
737
		else
737
		else
738
			clock.p2 = limit->p2.p2_slow;
738
			clock.p2 = limit->p2.p2_slow;
739
	} else {
739
	} else {
740
		if (target < limit->p2.dot_limit)
740
		if (target < limit->p2.dot_limit)
741
			clock.p2 = limit->p2.p2_slow;
741
			clock.p2 = limit->p2.p2_slow;
742
		else
742
		else
743
			clock.p2 = limit->p2.p2_fast;
743
			clock.p2 = limit->p2.p2_fast;
744
	}
744
	}
745
 
745
 
746
	memset(best_clock, 0, sizeof(*best_clock));
746
	memset(best_clock, 0, sizeof(*best_clock));
747
	max_n = limit->n.max;
747
	max_n = limit->n.max;
748
	/* based on hardware requirement, prefer smaller n to precision */
748
	/* based on hardware requirement, prefer smaller n to precision */
749
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
749
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
750
		/* based on hardware requirement, prefere larger m1,m2 */
750
		/* based on hardware requirement, prefere larger m1,m2 */
751
		for (clock.m1 = limit->m1.max;
751
		for (clock.m1 = limit->m1.max;
752
		     clock.m1 >= limit->m1.min; clock.m1--) {
752
		     clock.m1 >= limit->m1.min; clock.m1--) {
753
			for (clock.m2 = limit->m2.max;
753
			for (clock.m2 = limit->m2.max;
754
			     clock.m2 >= limit->m2.min; clock.m2--) {
754
			     clock.m2 >= limit->m2.min; clock.m2--) {
755
				for (clock.p1 = limit->p1.max;
755
				for (clock.p1 = limit->p1.max;
756
				     clock.p1 >= limit->p1.min; clock.p1--) {
756
				     clock.p1 >= limit->p1.min; clock.p1--) {
757
					int this_err;
757
					int this_err;
758
 
758
 
759
					i9xx_clock(refclk, &clock);
759
					i9xx_clock(refclk, &clock);
760
					if (!intel_PLL_is_valid(dev, limit,
760
					if (!intel_PLL_is_valid(dev, limit,
761
								&clock))
761
								&clock))
762
						continue;
762
						continue;
763
 
763
 
764
					this_err = abs(clock.dot - target);
764
					this_err = abs(clock.dot - target);
765
					if (this_err < err_most) {
765
					if (this_err < err_most) {
766
						*best_clock = clock;
766
						*best_clock = clock;
767
						err_most = this_err;
767
						err_most = this_err;
768
						max_n = clock.n;
768
						max_n = clock.n;
769
						found = true;
769
						found = true;
770
					}
770
					}
771
				}
771
				}
772
			}
772
			}
773
		}
773
		}
774
	}
774
	}
775
	return found;
775
	return found;
776
}
776
}
777
 
777
 
778
static bool
778
static bool
779
vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
779
vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
780
			int target, int refclk, intel_clock_t *match_clock,
780
			int target, int refclk, intel_clock_t *match_clock,
781
			intel_clock_t *best_clock)
781
			intel_clock_t *best_clock)
782
{
782
{
783
	struct drm_device *dev = crtc->dev;
783
	struct drm_device *dev = crtc->dev;
784
	intel_clock_t clock;
784
	intel_clock_t clock;
785
	unsigned int bestppm = 1000000;
785
	unsigned int bestppm = 1000000;
786
	/* min update 19.2 MHz */
786
	/* min update 19.2 MHz */
787
	int max_n = min(limit->n.max, refclk / 19200);
787
	int max_n = min(limit->n.max, refclk / 19200);
788
	bool found = false;
788
	bool found = false;
789
 
789
 
790
	target *= 5; /* fast clock */
790
	target *= 5; /* fast clock */
791
 
791
 
792
	memset(best_clock, 0, sizeof(*best_clock));
792
	memset(best_clock, 0, sizeof(*best_clock));
793
 
793
 
794
	/* based on hardware requirement, prefer smaller n to precision */
794
	/* based on hardware requirement, prefer smaller n to precision */
795
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
795
	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
796
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
796
		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
797
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
797
			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
798
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
798
			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
799
				clock.p = clock.p1 * clock.p2;
799
				clock.p = clock.p1 * clock.p2;
800
				/* based on hardware requirement, prefer bigger m1,m2 values */
800
				/* based on hardware requirement, prefer bigger m1,m2 values */
801
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
801
				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
802
					unsigned int ppm, diff;
802
					unsigned int ppm, diff;
803
 
803
 
804
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
804
					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
805
								     refclk * clock.m1);
805
								     refclk * clock.m1);
806
 
806
 
807
					vlv_clock(refclk, &clock);
807
					vlv_clock(refclk, &clock);
808
 
808
 
809
					if (!intel_PLL_is_valid(dev, limit,
809
					if (!intel_PLL_is_valid(dev, limit,
810
								&clock))
810
								&clock))
811
						continue;
811
						continue;
812
 
812
 
813
					diff = abs(clock.dot - target);
813
					diff = abs(clock.dot - target);
814
					ppm = div_u64(1000000ULL * diff, target);
814
					ppm = div_u64(1000000ULL * diff, target);
815
 
815
 
816
					if (ppm < 100 && clock.p > best_clock->p) {
816
					if (ppm < 100 && clock.p > best_clock->p) {
817
							bestppm = 0;
817
							bestppm = 0;
818
						*best_clock = clock;
818
						*best_clock = clock;
819
						found = true;
819
						found = true;
820
						}
820
						}
821
 
821
 
822
					if (bestppm >= 10 && ppm < bestppm - 10) {
822
					if (bestppm >= 10 && ppm < bestppm - 10) {
823
						bestppm = ppm;
823
						bestppm = ppm;
824
						*best_clock = clock;
824
						*best_clock = clock;
825
						found = true;
825
						found = true;
826
						}
826
						}
827
						}
827
						}
828
					}
828
					}
829
				}
829
				}
830
			}
830
			}
831
 
831
 
832
	return found;
832
	return found;
833
}
833
}
834
 
834
 
835
static bool
835
static bool
836
chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
836
chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
837
		   int target, int refclk, intel_clock_t *match_clock,
837
		   int target, int refclk, intel_clock_t *match_clock,
838
		   intel_clock_t *best_clock)
838
		   intel_clock_t *best_clock)
839
{
839
{
840
	struct drm_device *dev = crtc->dev;
840
	struct drm_device *dev = crtc->dev;
841
	intel_clock_t clock;
841
	intel_clock_t clock;
842
	uint64_t m2;
842
	uint64_t m2;
843
	int found = false;
843
	int found = false;
844
 
844
 
845
	memset(best_clock, 0, sizeof(*best_clock));
845
	memset(best_clock, 0, sizeof(*best_clock));
846
 
846
 
847
	/*
847
	/*
848
	 * Based on hardware doc, the n always set to 1, and m1 always
848
	 * Based on hardware doc, the n always set to 1, and m1 always
849
	 * set to 2.  If requires to support 200Mhz refclk, we need to
849
	 * set to 2.  If requires to support 200Mhz refclk, we need to
850
	 * revisit this because n may not 1 anymore.
850
	 * revisit this because n may not 1 anymore.
851
	 */
851
	 */
852
	clock.n = 1, clock.m1 = 2;
852
	clock.n = 1, clock.m1 = 2;
853
	target *= 5;	/* fast clock */
853
	target *= 5;	/* fast clock */
854
 
854
 
855
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
855
	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
856
		for (clock.p2 = limit->p2.p2_fast;
856
		for (clock.p2 = limit->p2.p2_fast;
857
				clock.p2 >= limit->p2.p2_slow;
857
				clock.p2 >= limit->p2.p2_slow;
858
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
858
				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
859
 
859
 
860
			clock.p = clock.p1 * clock.p2;
860
			clock.p = clock.p1 * clock.p2;
861
 
861
 
862
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
862
			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
863
					clock.n) << 22, refclk * clock.m1);
863
					clock.n) << 22, refclk * clock.m1);
864
 
864
 
865
			if (m2 > INT_MAX/clock.m1)
865
			if (m2 > INT_MAX/clock.m1)
866
				continue;
866
				continue;
867
 
867
 
868
			clock.m2 = m2;
868
			clock.m2 = m2;
869
 
869
 
870
			chv_clock(refclk, &clock);
870
			chv_clock(refclk, &clock);
871
 
871
 
872
			if (!intel_PLL_is_valid(dev, limit, &clock))
872
			if (!intel_PLL_is_valid(dev, limit, &clock))
873
				continue;
873
				continue;
874
 
874
 
875
			/* based on hardware requirement, prefer bigger p
875
			/* based on hardware requirement, prefer bigger p
876
			 */
876
			 */
877
			if (clock.p > best_clock->p) {
877
			if (clock.p > best_clock->p) {
878
				*best_clock = clock;
878
				*best_clock = clock;
879
				found = true;
879
				found = true;
880
			}
880
			}
881
		}
881
		}
882
	}
882
	}
883
 
883
 
884
	return found;
884
	return found;
885
}
885
}
886
 
886
 
887
bool intel_crtc_active(struct drm_crtc *crtc)
887
bool intel_crtc_active(struct drm_crtc *crtc)
888
{
888
{
889
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
889
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
890
 
890
 
891
	/* Be paranoid as we can arrive here with only partial
891
	/* Be paranoid as we can arrive here with only partial
892
	 * state retrieved from the hardware during setup.
892
	 * state retrieved from the hardware during setup.
893
	 *
893
	 *
894
	 * We can ditch the adjusted_mode.crtc_clock check as soon
894
	 * We can ditch the adjusted_mode.crtc_clock check as soon
895
	 * as Haswell has gained clock readout/fastboot support.
895
	 * as Haswell has gained clock readout/fastboot support.
896
	 *
896
	 *
897
	 * We can ditch the crtc->primary->fb check as soon as we can
897
	 * We can ditch the crtc->primary->fb check as soon as we can
898
	 * properly reconstruct framebuffers.
898
	 * properly reconstruct framebuffers.
899
	 */
899
	 */
900
	return intel_crtc->active && crtc->primary->fb &&
900
	return intel_crtc->active && crtc->primary->fb &&
901
		intel_crtc->config.adjusted_mode.crtc_clock;
901
		intel_crtc->config.adjusted_mode.crtc_clock;
902
}
902
}
903
 
903
 
904
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
904
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
905
					     enum pipe pipe)
905
					     enum pipe pipe)
906
{
906
{
907
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
907
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
908
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
908
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
909
 
909
 
910
	return intel_crtc->config.cpu_transcoder;
910
	return intel_crtc->config.cpu_transcoder;
911
}
911
}
912
 
912
 
913
static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
913
static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
914
{
914
{
915
	struct drm_i915_private *dev_priv = dev->dev_private;
915
	struct drm_i915_private *dev_priv = dev->dev_private;
916
	u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
916
	u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
917
 
917
 
918
	frame = I915_READ(frame_reg);
918
	frame = I915_READ(frame_reg);
919
 
919
 
920
	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
920
	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
921
		WARN(1, "vblank wait timed out\n");
921
		WARN(1, "vblank wait timed out\n");
922
}
922
}
923
 
923
 
924
/**
924
/**
925
 * intel_wait_for_vblank - wait for vblank on a given pipe
925
 * intel_wait_for_vblank - wait for vblank on a given pipe
926
 * @dev: drm device
926
 * @dev: drm device
927
 * @pipe: pipe to wait for
927
 * @pipe: pipe to wait for
928
 *
928
 *
929
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
929
 * Wait for vblank to occur on a given pipe.  Needed for various bits of
930
 * mode setting code.
930
 * mode setting code.
931
 */
931
 */
932
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
932
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
933
{
933
{
934
	struct drm_i915_private *dev_priv = dev->dev_private;
934
	struct drm_i915_private *dev_priv = dev->dev_private;
935
	int pipestat_reg = PIPESTAT(pipe);
935
	int pipestat_reg = PIPESTAT(pipe);
936
 
936
 
937
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
937
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
938
		g4x_wait_for_vblank(dev, pipe);
938
		g4x_wait_for_vblank(dev, pipe);
939
		return;
939
		return;
940
	}
940
	}
941
 
941
 
942
	/* Clear existing vblank status. Note this will clear any other
942
	/* Clear existing vblank status. Note this will clear any other
943
	 * sticky status fields as well.
943
	 * sticky status fields as well.
944
	 *
944
	 *
945
	 * This races with i915_driver_irq_handler() with the result
945
	 * This races with i915_driver_irq_handler() with the result
946
	 * that either function could miss a vblank event.  Here it is not
946
	 * that either function could miss a vblank event.  Here it is not
947
	 * fatal, as we will either wait upon the next vblank interrupt or
947
	 * fatal, as we will either wait upon the next vblank interrupt or
948
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
948
	 * timeout.  Generally speaking intel_wait_for_vblank() is only
949
	 * called during modeset at which time the GPU should be idle and
949
	 * called during modeset at which time the GPU should be idle and
950
	 * should *not* be performing page flips and thus not waiting on
950
	 * should *not* be performing page flips and thus not waiting on
951
	 * vblanks...
951
	 * vblanks...
952
	 * Currently, the result of us stealing a vblank from the irq
952
	 * Currently, the result of us stealing a vblank from the irq
953
	 * handler is that a single frame will be skipped during swapbuffers.
953
	 * handler is that a single frame will be skipped during swapbuffers.
954
	 */
954
	 */
955
	I915_WRITE(pipestat_reg,
955
	I915_WRITE(pipestat_reg,
956
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
956
		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
957
 
957
 
958
	/* Wait for vblank interrupt bit to set */
958
	/* Wait for vblank interrupt bit to set */
959
	if (wait_for(I915_READ(pipestat_reg) &
959
	if (wait_for(I915_READ(pipestat_reg) &
960
		     PIPE_VBLANK_INTERRUPT_STATUS,
960
		     PIPE_VBLANK_INTERRUPT_STATUS,
961
		     50))
961
		     50))
962
		DRM_DEBUG_KMS("vblank wait timed out\n");
962
		DRM_DEBUG_KMS("vblank wait timed out\n");
963
}
963
}
964
 
964
 
965
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
965
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
966
{
966
{
967
	struct drm_i915_private *dev_priv = dev->dev_private;
967
	struct drm_i915_private *dev_priv = dev->dev_private;
968
	u32 reg = PIPEDSL(pipe);
968
	u32 reg = PIPEDSL(pipe);
969
	u32 line1, line2;
969
	u32 line1, line2;
970
	u32 line_mask;
970
	u32 line_mask;
971
 
971
 
972
	if (IS_GEN2(dev))
972
	if (IS_GEN2(dev))
973
		line_mask = DSL_LINEMASK_GEN2;
973
		line_mask = DSL_LINEMASK_GEN2;
974
	else
974
	else
975
		line_mask = DSL_LINEMASK_GEN3;
975
		line_mask = DSL_LINEMASK_GEN3;
976
 
976
 
977
	line1 = I915_READ(reg) & line_mask;
977
	line1 = I915_READ(reg) & line_mask;
978
	mdelay(5);
978
	mdelay(5);
979
	line2 = I915_READ(reg) & line_mask;
979
	line2 = I915_READ(reg) & line_mask;
980
 
980
 
981
	return line1 == line2;
981
	return line1 == line2;
982
}
982
}
983
 
983
 
984
/*
984
/*
985
 * intel_wait_for_pipe_off - wait for pipe to turn off
985
 * intel_wait_for_pipe_off - wait for pipe to turn off
986
 * @dev: drm device
986
 * @dev: drm device
987
 * @pipe: pipe to wait for
987
 * @pipe: pipe to wait for
988
 *
988
 *
989
 * After disabling a pipe, we can't wait for vblank in the usual way,
989
 * After disabling a pipe, we can't wait for vblank in the usual way,
990
 * spinning on the vblank interrupt status bit, since we won't actually
990
 * spinning on the vblank interrupt status bit, since we won't actually
991
 * see an interrupt when the pipe is disabled.
991
 * see an interrupt when the pipe is disabled.
992
 *
992
 *
993
 * On Gen4 and above:
993
 * On Gen4 and above:
994
 *   wait for the pipe register state bit to turn off
994
 *   wait for the pipe register state bit to turn off
995
 *
995
 *
996
 * Otherwise:
996
 * Otherwise:
997
 *   wait for the display line value to settle (it usually
997
 *   wait for the display line value to settle (it usually
998
 *   ends up stopping at the start of the next frame).
998
 *   ends up stopping at the start of the next frame).
999
 *
999
 *
1000
 */
1000
 */
1001
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1001
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1002
{
1002
{
1003
	struct drm_i915_private *dev_priv = dev->dev_private;
1003
	struct drm_i915_private *dev_priv = dev->dev_private;
1004
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1004
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1005
								      pipe);
1005
								      pipe);
1006
 
1006
 
1007
	if (INTEL_INFO(dev)->gen >= 4) {
1007
	if (INTEL_INFO(dev)->gen >= 4) {
1008
		int reg = PIPECONF(cpu_transcoder);
1008
		int reg = PIPECONF(cpu_transcoder);
1009
 
1009
 
1010
		/* Wait for the Pipe State to go off */
1010
		/* Wait for the Pipe State to go off */
1011
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1011
		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1012
			     100))
1012
			     100))
1013
			WARN(1, "pipe_off wait timed out\n");
1013
			WARN(1, "pipe_off wait timed out\n");
1014
	} else {
1014
	} else {
1015
		/* Wait for the display line to settle */
1015
		/* Wait for the display line to settle */
1016
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1016
		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1017
			WARN(1, "pipe_off wait timed out\n");
1017
			WARN(1, "pipe_off wait timed out\n");
1018
	}
1018
	}
1019
}
1019
}
1020
 
1020
 
1021
/*
1021
/*
1022
 * ibx_digital_port_connected - is the specified port connected?
1022
 * ibx_digital_port_connected - is the specified port connected?
1023
 * @dev_priv: i915 private structure
1023
 * @dev_priv: i915 private structure
1024
 * @port: the port to test
1024
 * @port: the port to test
1025
 *
1025
 *
1026
 * Returns true if @port is connected, false otherwise.
1026
 * Returns true if @port is connected, false otherwise.
1027
 */
1027
 */
1028
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
1028
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
1029
				struct intel_digital_port *port)
1029
				struct intel_digital_port *port)
1030
{
1030
{
1031
	u32 bit;
1031
	u32 bit;
1032
 
1032
 
1033
	if (HAS_PCH_IBX(dev_priv->dev)) {
1033
	if (HAS_PCH_IBX(dev_priv->dev)) {
1034
		switch (port->port) {
1034
		switch (port->port) {
1035
		case PORT_B:
1035
		case PORT_B:
1036
			bit = SDE_PORTB_HOTPLUG;
1036
			bit = SDE_PORTB_HOTPLUG;
1037
			break;
1037
			break;
1038
		case PORT_C:
1038
		case PORT_C:
1039
			bit = SDE_PORTC_HOTPLUG;
1039
			bit = SDE_PORTC_HOTPLUG;
1040
			break;
1040
			break;
1041
		case PORT_D:
1041
		case PORT_D:
1042
			bit = SDE_PORTD_HOTPLUG;
1042
			bit = SDE_PORTD_HOTPLUG;
1043
			break;
1043
			break;
1044
		default:
1044
		default:
1045
			return true;
1045
			return true;
1046
		}
1046
		}
1047
	} else {
1047
	} else {
1048
		switch (port->port) {
1048
		switch (port->port) {
1049
		case PORT_B:
1049
		case PORT_B:
1050
			bit = SDE_PORTB_HOTPLUG_CPT;
1050
			bit = SDE_PORTB_HOTPLUG_CPT;
1051
			break;
1051
			break;
1052
		case PORT_C:
1052
		case PORT_C:
1053
			bit = SDE_PORTC_HOTPLUG_CPT;
1053
			bit = SDE_PORTC_HOTPLUG_CPT;
1054
			break;
1054
			break;
1055
		case PORT_D:
1055
		case PORT_D:
1056
			bit = SDE_PORTD_HOTPLUG_CPT;
1056
			bit = SDE_PORTD_HOTPLUG_CPT;
1057
			break;
1057
			break;
1058
		default:
1058
		default:
1059
			return true;
1059
			return true;
1060
		}
1060
		}
1061
	}
1061
	}
1062
 
1062
 
1063
	return I915_READ(SDEISR) & bit;
1063
	return I915_READ(SDEISR) & bit;
1064
}
1064
}
1065
 
1065
 
1066
static const char *state_string(bool enabled)
1066
static const char *state_string(bool enabled)
1067
{
1067
{
1068
	return enabled ? "on" : "off";
1068
	return enabled ? "on" : "off";
1069
}
1069
}
1070
 
1070
 
1071
/* Only for pre-ILK configs */
1071
/* Only for pre-ILK configs */
1072
void assert_pll(struct drm_i915_private *dev_priv,
1072
void assert_pll(struct drm_i915_private *dev_priv,
1073
		       enum pipe pipe, bool state)
1073
		       enum pipe pipe, bool state)
1074
{
1074
{
1075
	int reg;
1075
	int reg;
1076
	u32 val;
1076
	u32 val;
1077
	bool cur_state;
1077
	bool cur_state;
1078
 
1078
 
1079
	reg = DPLL(pipe);
1079
	reg = DPLL(pipe);
1080
	val = I915_READ(reg);
1080
	val = I915_READ(reg);
1081
	cur_state = !!(val & DPLL_VCO_ENABLE);
1081
	cur_state = !!(val & DPLL_VCO_ENABLE);
1082
	WARN(cur_state != state,
1082
	WARN(cur_state != state,
1083
	     "PLL state assertion failure (expected %s, current %s)\n",
1083
	     "PLL state assertion failure (expected %s, current %s)\n",
1084
	     state_string(state), state_string(cur_state));
1084
	     state_string(state), state_string(cur_state));
1085
}
1085
}
1086
 
1086
 
1087
/* XXX: the dsi pll is shared between MIPI DSI ports */
1087
/* XXX: the dsi pll is shared between MIPI DSI ports */
1088
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1088
static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1089
{
1089
{
1090
	u32 val;
1090
	u32 val;
1091
	bool cur_state;
1091
	bool cur_state;
1092
 
1092
 
1093
	mutex_lock(&dev_priv->dpio_lock);
1093
	mutex_lock(&dev_priv->dpio_lock);
1094
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1094
	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1095
	mutex_unlock(&dev_priv->dpio_lock);
1095
	mutex_unlock(&dev_priv->dpio_lock);
1096
 
1096
 
1097
	cur_state = val & DSI_PLL_VCO_EN;
1097
	cur_state = val & DSI_PLL_VCO_EN;
1098
	WARN(cur_state != state,
1098
	WARN(cur_state != state,
1099
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1099
	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1100
	     state_string(state), state_string(cur_state));
1100
	     state_string(state), state_string(cur_state));
1101
}
1101
}
1102
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1102
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1103
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1103
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1104
 
1104
 
1105
struct intel_shared_dpll *
1105
struct intel_shared_dpll *
1106
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1106
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1107
{
1107
{
1108
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1108
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1109
 
1109
 
1110
	if (crtc->config.shared_dpll < 0)
1110
	if (crtc->config.shared_dpll < 0)
1111
		return NULL;
1111
		return NULL;
1112
 
1112
 
1113
	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
1113
	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
1114
}
1114
}
1115
 
1115
 
1116
/* For ILK+ */
1116
/* For ILK+ */
1117
void assert_shared_dpll(struct drm_i915_private *dev_priv,
1117
void assert_shared_dpll(struct drm_i915_private *dev_priv,
1118
			       struct intel_shared_dpll *pll,
1118
			       struct intel_shared_dpll *pll,
1119
			   bool state)
1119
			   bool state)
1120
{
1120
{
1121
	bool cur_state;
1121
	bool cur_state;
1122
	struct intel_dpll_hw_state hw_state;
1122
	struct intel_dpll_hw_state hw_state;
1123
 
1123
 
1124
	if (WARN (!pll,
1124
	if (WARN (!pll,
1125
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
1125
		  "asserting DPLL %s with no DPLL\n", state_string(state)))
1126
		return;
1126
		return;
1127
 
1127
 
1128
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1128
	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1129
	WARN(cur_state != state,
1129
	WARN(cur_state != state,
1130
	     "%s assertion failure (expected %s, current %s)\n",
1130
	     "%s assertion failure (expected %s, current %s)\n",
1131
	     pll->name, state_string(state), state_string(cur_state));
1131
	     pll->name, state_string(state), state_string(cur_state));
1132
}
1132
}
1133
 
1133
 
1134
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1134
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1135
			  enum pipe pipe, bool state)
1135
			  enum pipe pipe, bool state)
1136
{
1136
{
1137
	int reg;
1137
	int reg;
1138
	u32 val;
1138
	u32 val;
1139
	bool cur_state;
1139
	bool cur_state;
1140
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1140
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1141
								      pipe);
1141
								      pipe);
1142
 
1142
 
1143
	if (HAS_DDI(dev_priv->dev)) {
1143
	if (HAS_DDI(dev_priv->dev)) {
1144
		/* DDI does not have a specific FDI_TX register */
1144
		/* DDI does not have a specific FDI_TX register */
1145
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1145
		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1146
		val = I915_READ(reg);
1146
		val = I915_READ(reg);
1147
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1147
		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1148
	} else {
1148
	} else {
1149
	reg = FDI_TX_CTL(pipe);
1149
	reg = FDI_TX_CTL(pipe);
1150
	val = I915_READ(reg);
1150
	val = I915_READ(reg);
1151
	cur_state = !!(val & FDI_TX_ENABLE);
1151
	cur_state = !!(val & FDI_TX_ENABLE);
1152
	}
1152
	}
1153
	WARN(cur_state != state,
1153
	WARN(cur_state != state,
1154
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1154
	     "FDI TX state assertion failure (expected %s, current %s)\n",
1155
	     state_string(state), state_string(cur_state));
1155
	     state_string(state), state_string(cur_state));
1156
}
1156
}
1157
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1157
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1158
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1158
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1159
 
1159
 
1160
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1160
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1161
			  enum pipe pipe, bool state)
1161
			  enum pipe pipe, bool state)
1162
{
1162
{
1163
	int reg;
1163
	int reg;
1164
	u32 val;
1164
	u32 val;
1165
	bool cur_state;
1165
	bool cur_state;
1166
 
1166
 
1167
	reg = FDI_RX_CTL(pipe);
1167
	reg = FDI_RX_CTL(pipe);
1168
	val = I915_READ(reg);
1168
	val = I915_READ(reg);
1169
	cur_state = !!(val & FDI_RX_ENABLE);
1169
	cur_state = !!(val & FDI_RX_ENABLE);
1170
	WARN(cur_state != state,
1170
	WARN(cur_state != state,
1171
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1171
	     "FDI RX state assertion failure (expected %s, current %s)\n",
1172
	     state_string(state), state_string(cur_state));
1172
	     state_string(state), state_string(cur_state));
1173
}
1173
}
1174
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1174
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1175
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1175
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1176
 
1176
 
1177
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1177
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1178
				      enum pipe pipe)
1178
				      enum pipe pipe)
1179
{
1179
{
1180
	int reg;
1180
	int reg;
1181
	u32 val;
1181
	u32 val;
1182
 
1182
 
1183
	/* ILK FDI PLL is always enabled */
1183
	/* ILK FDI PLL is always enabled */
1184
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
1184
	if (INTEL_INFO(dev_priv->dev)->gen == 5)
1185
		return;
1185
		return;
1186
 
1186
 
1187
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1187
	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1188
	if (HAS_DDI(dev_priv->dev))
1188
	if (HAS_DDI(dev_priv->dev))
1189
		return;
1189
		return;
1190
 
1190
 
1191
	reg = FDI_TX_CTL(pipe);
1191
	reg = FDI_TX_CTL(pipe);
1192
	val = I915_READ(reg);
1192
	val = I915_READ(reg);
1193
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1193
	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1194
}
1194
}
1195
 
1195
 
1196
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1196
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1197
		       enum pipe pipe, bool state)
1197
		       enum pipe pipe, bool state)
1198
{
1198
{
1199
	int reg;
1199
	int reg;
1200
	u32 val;
1200
	u32 val;
1201
	bool cur_state;
1201
	bool cur_state;
1202
 
1202
 
1203
	reg = FDI_RX_CTL(pipe);
1203
	reg = FDI_RX_CTL(pipe);
1204
	val = I915_READ(reg);
1204
	val = I915_READ(reg);
1205
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1205
	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1206
	WARN(cur_state != state,
1206
	WARN(cur_state != state,
1207
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1207
	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1208
	     state_string(state), state_string(cur_state));
1208
	     state_string(state), state_string(cur_state));
1209
}
1209
}
1210
 
1210
 
1211
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1211
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1212
				  enum pipe pipe)
1212
				  enum pipe pipe)
1213
{
1213
{
1214
	int pp_reg, lvds_reg;
1214
	int pp_reg, lvds_reg;
1215
	u32 val;
1215
	u32 val;
1216
	enum pipe panel_pipe = PIPE_A;
1216
	enum pipe panel_pipe = PIPE_A;
1217
	bool locked = true;
1217
	bool locked = true;
1218
 
1218
 
1219
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1219
	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1220
		pp_reg = PCH_PP_CONTROL;
1220
		pp_reg = PCH_PP_CONTROL;
1221
		lvds_reg = PCH_LVDS;
1221
		lvds_reg = PCH_LVDS;
1222
	} else {
1222
	} else {
1223
		pp_reg = PP_CONTROL;
1223
		pp_reg = PP_CONTROL;
1224
		lvds_reg = LVDS;
1224
		lvds_reg = LVDS;
1225
	}
1225
	}
1226
 
1226
 
1227
	val = I915_READ(pp_reg);
1227
	val = I915_READ(pp_reg);
1228
	if (!(val & PANEL_POWER_ON) ||
1228
	if (!(val & PANEL_POWER_ON) ||
1229
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1229
	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1230
		locked = false;
1230
		locked = false;
1231
 
1231
 
1232
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1232
	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1233
		panel_pipe = PIPE_B;
1233
		panel_pipe = PIPE_B;
1234
 
1234
 
1235
	WARN(panel_pipe == pipe && locked,
1235
	WARN(panel_pipe == pipe && locked,
1236
	     "panel assertion failure, pipe %c regs locked\n",
1236
	     "panel assertion failure, pipe %c regs locked\n",
1237
	     pipe_name(pipe));
1237
	     pipe_name(pipe));
1238
}
1238
}
1239
 
1239
 
1240
static void assert_cursor(struct drm_i915_private *dev_priv,
1240
static void assert_cursor(struct drm_i915_private *dev_priv,
1241
			  enum pipe pipe, bool state)
1241
			  enum pipe pipe, bool state)
1242
{
1242
{
1243
	struct drm_device *dev = dev_priv->dev;
1243
	struct drm_device *dev = dev_priv->dev;
1244
	bool cur_state;
1244
	bool cur_state;
1245
 
1245
 
1246
	if (IS_845G(dev) || IS_I865G(dev))
1246
	if (IS_845G(dev) || IS_I865G(dev))
1247
		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1247
		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1248
	else
1248
	else
1249
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1249
		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1250
 
1250
 
1251
	WARN(cur_state != state,
1251
	WARN(cur_state != state,
1252
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1252
	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1253
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1253
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1254
}
1254
}
1255
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1255
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1256
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1256
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1257
 
1257
 
1258
void assert_pipe(struct drm_i915_private *dev_priv,
1258
void assert_pipe(struct drm_i915_private *dev_priv,
1259
			enum pipe pipe, bool state)
1259
			enum pipe pipe, bool state)
1260
{
1260
{
1261
	int reg;
1261
	int reg;
1262
	u32 val;
1262
	u32 val;
1263
	bool cur_state;
1263
	bool cur_state;
1264
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1264
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1265
								      pipe);
1265
								      pipe);
1266
 
1266
 
1267
	/* if we need the pipe A quirk it must be always on */
1267
	/* if we need the pipe A quirk it must be always on */
1268
	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1268
	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1269
		state = true;
1269
		state = true;
1270
 
1270
 
1271
	if (!intel_display_power_enabled(dev_priv,
1271
	if (!intel_display_power_enabled(dev_priv,
1272
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1272
				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1273
		cur_state = false;
1273
		cur_state = false;
1274
	} else {
1274
	} else {
1275
	reg = PIPECONF(cpu_transcoder);
1275
	reg = PIPECONF(cpu_transcoder);
1276
	val = I915_READ(reg);
1276
	val = I915_READ(reg);
1277
	cur_state = !!(val & PIPECONF_ENABLE);
1277
	cur_state = !!(val & PIPECONF_ENABLE);
1278
	}
1278
	}
1279
 
1279
 
1280
	WARN(cur_state != state,
1280
	WARN(cur_state != state,
1281
	     "pipe %c assertion failure (expected %s, current %s)\n",
1281
	     "pipe %c assertion failure (expected %s, current %s)\n",
1282
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1282
	     pipe_name(pipe), state_string(state), state_string(cur_state));
1283
}
1283
}
1284
 
1284
 
1285
static void assert_plane(struct drm_i915_private *dev_priv,
1285
static void assert_plane(struct drm_i915_private *dev_priv,
1286
			 enum plane plane, bool state)
1286
			 enum plane plane, bool state)
1287
{
1287
{
1288
	int reg;
1288
	int reg;
1289
	u32 val;
1289
	u32 val;
1290
	bool cur_state;
1290
	bool cur_state;
1291
 
1291
 
1292
	reg = DSPCNTR(plane);
1292
	reg = DSPCNTR(plane);
1293
	val = I915_READ(reg);
1293
	val = I915_READ(reg);
1294
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1294
	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1295
	WARN(cur_state != state,
1295
	WARN(cur_state != state,
1296
	     "plane %c assertion failure (expected %s, current %s)\n",
1296
	     "plane %c assertion failure (expected %s, current %s)\n",
1297
	     plane_name(plane), state_string(state), state_string(cur_state));
1297
	     plane_name(plane), state_string(state), state_string(cur_state));
1298
}
1298
}
1299
 
1299
 
1300
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1300
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1301
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1301
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1302
 
1302
 
1303
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1303
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1304
				   enum pipe pipe)
1304
				   enum pipe pipe)
1305
{
1305
{
1306
	struct drm_device *dev = dev_priv->dev;
1306
	struct drm_device *dev = dev_priv->dev;
1307
	int reg, i;
1307
	int reg, i;
1308
	u32 val;
1308
	u32 val;
1309
	int cur_pipe;
1309
	int cur_pipe;
1310
 
1310
 
1311
	/* Primary planes are fixed to pipes on gen4+ */
1311
	/* Primary planes are fixed to pipes on gen4+ */
1312
	if (INTEL_INFO(dev)->gen >= 4) {
1312
	if (INTEL_INFO(dev)->gen >= 4) {
1313
		reg = DSPCNTR(pipe);
1313
		reg = DSPCNTR(pipe);
1314
		val = I915_READ(reg);
1314
		val = I915_READ(reg);
1315
		WARN(val & DISPLAY_PLANE_ENABLE,
1315
		WARN(val & DISPLAY_PLANE_ENABLE,
1316
		     "plane %c assertion failure, should be disabled but not\n",
1316
		     "plane %c assertion failure, should be disabled but not\n",
1317
		     plane_name(pipe));
1317
		     plane_name(pipe));
1318
		return;
1318
		return;
1319
	}
1319
	}
1320
 
1320
 
1321
	/* Need to check both planes against the pipe */
1321
	/* Need to check both planes against the pipe */
1322
	for_each_pipe(i) {
1322
	for_each_pipe(i) {
1323
		reg = DSPCNTR(i);
1323
		reg = DSPCNTR(i);
1324
		val = I915_READ(reg);
1324
		val = I915_READ(reg);
1325
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1325
		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1326
			DISPPLANE_SEL_PIPE_SHIFT;
1326
			DISPPLANE_SEL_PIPE_SHIFT;
1327
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1327
		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1328
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1328
		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1329
		     plane_name(i), pipe_name(pipe));
1329
		     plane_name(i), pipe_name(pipe));
1330
	}
1330
	}
1331
}
1331
}
1332
 
1332
 
1333
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1333
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1334
				    enum pipe pipe)
1334
				    enum pipe pipe)
1335
{
1335
{
1336
	struct drm_device *dev = dev_priv->dev;
1336
	struct drm_device *dev = dev_priv->dev;
1337
	int reg, sprite;
1337
	int reg, sprite;
1338
	u32 val;
1338
	u32 val;
1339
 
1339
 
1340
	if (IS_VALLEYVIEW(dev)) {
1340
	if (IS_VALLEYVIEW(dev)) {
1341
		for_each_sprite(pipe, sprite) {
1341
		for_each_sprite(pipe, sprite) {
1342
			reg = SPCNTR(pipe, sprite);
1342
			reg = SPCNTR(pipe, sprite);
1343
		val = I915_READ(reg);
1343
		val = I915_READ(reg);
1344
			WARN(val & SP_ENABLE,
1344
			WARN(val & SP_ENABLE,
1345
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1345
			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1346
			     sprite_name(pipe, sprite), pipe_name(pipe));
1346
			     sprite_name(pipe, sprite), pipe_name(pipe));
1347
		}
1347
		}
1348
	} else if (INTEL_INFO(dev)->gen >= 7) {
1348
	} else if (INTEL_INFO(dev)->gen >= 7) {
1349
		reg = SPRCTL(pipe);
1349
		reg = SPRCTL(pipe);
1350
		val = I915_READ(reg);
1350
		val = I915_READ(reg);
1351
		WARN(val & SPRITE_ENABLE,
1351
		WARN(val & SPRITE_ENABLE,
1352
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1352
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1353
		     plane_name(pipe), pipe_name(pipe));
1353
		     plane_name(pipe), pipe_name(pipe));
1354
	} else if (INTEL_INFO(dev)->gen >= 5) {
1354
	} else if (INTEL_INFO(dev)->gen >= 5) {
1355
		reg = DVSCNTR(pipe);
1355
		reg = DVSCNTR(pipe);
1356
		val = I915_READ(reg);
1356
		val = I915_READ(reg);
1357
		WARN(val & DVS_ENABLE,
1357
		WARN(val & DVS_ENABLE,
1358
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1358
		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1359
		     plane_name(pipe), pipe_name(pipe));
1359
		     plane_name(pipe), pipe_name(pipe));
1360
	}
1360
	}
1361
}
1361
}
1362
 
1362
 
1363
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1363
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1364
{
1364
{
1365
	u32 val;
1365
	u32 val;
1366
	bool enabled;
1366
	bool enabled;
1367
 
1367
 
1368
	WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1368
	WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1369
 
1369
 
1370
	val = I915_READ(PCH_DREF_CONTROL);
1370
	val = I915_READ(PCH_DREF_CONTROL);
1371
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1371
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1372
			    DREF_SUPERSPREAD_SOURCE_MASK));
1372
			    DREF_SUPERSPREAD_SOURCE_MASK));
1373
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1373
	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1374
}
1374
}
1375
 
1375
 
1376
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1376
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1377
				       enum pipe pipe)
1377
				       enum pipe pipe)
1378
{
1378
{
1379
	int reg;
1379
	int reg;
1380
	u32 val;
1380
	u32 val;
1381
	bool enabled;
1381
	bool enabled;
1382
 
1382
 
1383
	reg = PCH_TRANSCONF(pipe);
1383
	reg = PCH_TRANSCONF(pipe);
1384
	val = I915_READ(reg);
1384
	val = I915_READ(reg);
1385
	enabled = !!(val & TRANS_ENABLE);
1385
	enabled = !!(val & TRANS_ENABLE);
1386
	WARN(enabled,
1386
	WARN(enabled,
1387
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1387
	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1388
	     pipe_name(pipe));
1388
	     pipe_name(pipe));
1389
}
1389
}
1390
 
1390
 
1391
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1391
static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1392
			    enum pipe pipe, u32 port_sel, u32 val)
1392
			    enum pipe pipe, u32 port_sel, u32 val)
1393
{
1393
{
1394
	if ((val & DP_PORT_EN) == 0)
1394
	if ((val & DP_PORT_EN) == 0)
1395
		return false;
1395
		return false;
1396
 
1396
 
1397
	if (HAS_PCH_CPT(dev_priv->dev)) {
1397
	if (HAS_PCH_CPT(dev_priv->dev)) {
1398
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1398
		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1399
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1399
		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1400
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1400
		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1401
			return false;
1401
			return false;
1402
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1402
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1403
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1403
		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1404
			return false;
1404
			return false;
1405
	} else {
1405
	} else {
1406
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1406
		if ((val & DP_PIPE_MASK) != (pipe << 30))
1407
			return false;
1407
			return false;
1408
	}
1408
	}
1409
	return true;
1409
	return true;
1410
}
1410
}
1411
 
1411
 
1412
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1412
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1413
			      enum pipe pipe, u32 val)
1413
			      enum pipe pipe, u32 val)
1414
{
1414
{
1415
	if ((val & SDVO_ENABLE) == 0)
1415
	if ((val & SDVO_ENABLE) == 0)
1416
		return false;
1416
		return false;
1417
 
1417
 
1418
	if (HAS_PCH_CPT(dev_priv->dev)) {
1418
	if (HAS_PCH_CPT(dev_priv->dev)) {
1419
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1419
		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1420
			return false;
1420
			return false;
1421
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1421
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1422
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1422
		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1423
			return false;
1423
			return false;
1424
	} else {
1424
	} else {
1425
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1425
		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1426
			return false;
1426
			return false;
1427
	}
1427
	}
1428
	return true;
1428
	return true;
1429
}
1429
}
1430
 
1430
 
1431
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1431
static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1432
			      enum pipe pipe, u32 val)
1432
			      enum pipe pipe, u32 val)
1433
{
1433
{
1434
	if ((val & LVDS_PORT_EN) == 0)
1434
	if ((val & LVDS_PORT_EN) == 0)
1435
		return false;
1435
		return false;
1436
 
1436
 
1437
	if (HAS_PCH_CPT(dev_priv->dev)) {
1437
	if (HAS_PCH_CPT(dev_priv->dev)) {
1438
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1438
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1439
			return false;
1439
			return false;
1440
	} else {
1440
	} else {
1441
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1441
		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1442
			return false;
1442
			return false;
1443
	}
1443
	}
1444
	return true;
1444
	return true;
1445
}
1445
}
1446
 
1446
 
1447
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1447
static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1448
			      enum pipe pipe, u32 val)
1448
			      enum pipe pipe, u32 val)
1449
{
1449
{
1450
	if ((val & ADPA_DAC_ENABLE) == 0)
1450
	if ((val & ADPA_DAC_ENABLE) == 0)
1451
		return false;
1451
		return false;
1452
	if (HAS_PCH_CPT(dev_priv->dev)) {
1452
	if (HAS_PCH_CPT(dev_priv->dev)) {
1453
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1453
		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1454
			return false;
1454
			return false;
1455
	} else {
1455
	} else {
1456
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1456
		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1457
			return false;
1457
			return false;
1458
	}
1458
	}
1459
	return true;
1459
	return true;
1460
}
1460
}
1461
 
1461
 
1462
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1462
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1463
				   enum pipe pipe, int reg, u32 port_sel)
1463
				   enum pipe pipe, int reg, u32 port_sel)
1464
{
1464
{
1465
	u32 val = I915_READ(reg);
1465
	u32 val = I915_READ(reg);
1466
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1466
	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1467
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1467
	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1468
	     reg, pipe_name(pipe));
1468
	     reg, pipe_name(pipe));
1469
 
1469
 
1470
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1470
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1471
	     && (val & DP_PIPEB_SELECT),
1471
	     && (val & DP_PIPEB_SELECT),
1472
	     "IBX PCH dp port still using transcoder B\n");
1472
	     "IBX PCH dp port still using transcoder B\n");
1473
}
1473
}
1474
 
1474
 
1475
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1475
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1476
				     enum pipe pipe, int reg)
1476
				     enum pipe pipe, int reg)
1477
{
1477
{
1478
	u32 val = I915_READ(reg);
1478
	u32 val = I915_READ(reg);
1479
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1479
	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1480
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1480
	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1481
	     reg, pipe_name(pipe));
1481
	     reg, pipe_name(pipe));
1482
 
1482
 
1483
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1483
	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1484
	     && (val & SDVO_PIPE_B_SELECT),
1484
	     && (val & SDVO_PIPE_B_SELECT),
1485
	     "IBX PCH hdmi port still using transcoder B\n");
1485
	     "IBX PCH hdmi port still using transcoder B\n");
1486
}
1486
}
1487
 
1487
 
1488
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1488
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1489
				      enum pipe pipe)
1489
				      enum pipe pipe)
1490
{
1490
{
1491
	int reg;
1491
	int reg;
1492
	u32 val;
1492
	u32 val;
1493
 
1493
 
1494
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1494
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1495
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1495
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1496
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1496
	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1497
 
1497
 
1498
	reg = PCH_ADPA;
1498
	reg = PCH_ADPA;
1499
	val = I915_READ(reg);
1499
	val = I915_READ(reg);
1500
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1500
	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1501
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1501
	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1502
	     pipe_name(pipe));
1502
	     pipe_name(pipe));
1503
 
1503
 
1504
	reg = PCH_LVDS;
1504
	reg = PCH_LVDS;
1505
	val = I915_READ(reg);
1505
	val = I915_READ(reg);
1506
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1506
	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1507
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1507
	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1508
	     pipe_name(pipe));
1508
	     pipe_name(pipe));
1509
 
1509
 
1510
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1510
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1511
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1511
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1512
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1512
	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1513
}
1513
}
1514
 
1514
 
1515
static void intel_init_dpio(struct drm_device *dev)
1515
static void intel_init_dpio(struct drm_device *dev)
1516
{
1516
{
1517
	struct drm_i915_private *dev_priv = dev->dev_private;
1517
	struct drm_i915_private *dev_priv = dev->dev_private;
1518
 
1518
 
1519
	if (!IS_VALLEYVIEW(dev))
1519
	if (!IS_VALLEYVIEW(dev))
1520
		return;
1520
		return;
1521
 
1521
 
1522
	/*
1522
	/*
1523
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1523
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1524
	 * CHV x1 PHY (DP/HDMI D)
1524
	 * CHV x1 PHY (DP/HDMI D)
1525
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1525
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1526
	 */
1526
	 */
1527
	if (IS_CHERRYVIEW(dev)) {
1527
	if (IS_CHERRYVIEW(dev)) {
1528
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1528
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1529
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1529
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1530
	} else {
1530
	} else {
1531
	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1531
	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1532
	}
1532
	}
1533
}
1533
}
1534
 
1534
 
1535
static void intel_reset_dpio(struct drm_device *dev)
1535
static void intel_reset_dpio(struct drm_device *dev)
1536
{
1536
{
1537
	struct drm_i915_private *dev_priv = dev->dev_private;
1537
	struct drm_i915_private *dev_priv = dev->dev_private;
1538
 
1538
 
1539
	if (IS_CHERRYVIEW(dev)) {
1539
	if (IS_CHERRYVIEW(dev)) {
1540
		enum dpio_phy phy;
1540
		enum dpio_phy phy;
1541
		u32 val;
1541
		u32 val;
1542
 
1542
 
1543
		for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
1543
		for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
1544
			/* Poll for phypwrgood signal */
1544
			/* Poll for phypwrgood signal */
1545
			if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
1545
			if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
1546
						PHY_POWERGOOD(phy), 1))
1546
						PHY_POWERGOOD(phy), 1))
1547
				DRM_ERROR("Display PHY %d is not power up\n", phy);
1547
				DRM_ERROR("Display PHY %d is not power up\n", phy);
1548
 
1548
 
1549
	/*
1549
	/*
1550
			 * Deassert common lane reset for PHY.
1550
			 * Deassert common lane reset for PHY.
1551
			 *
1551
			 *
1552
			 * This should only be done on init and resume from S3
1552
			 * This should only be done on init and resume from S3
1553
			 * with both PLLs disabled, or we risk losing DPIO and
1553
			 * with both PLLs disabled, or we risk losing DPIO and
1554
			 * PLL synchronization.
1554
			 * PLL synchronization.
1555
			 */
1555
			 */
1556
			val = I915_READ(DISPLAY_PHY_CONTROL);
1556
			val = I915_READ(DISPLAY_PHY_CONTROL);
1557
			I915_WRITE(DISPLAY_PHY_CONTROL,
1557
			I915_WRITE(DISPLAY_PHY_CONTROL,
1558
				PHY_COM_LANE_RESET_DEASSERT(phy, val));
1558
				PHY_COM_LANE_RESET_DEASSERT(phy, val));
1559
		}
1559
		}
1560
	}
1560
	}
1561
}
1561
}
1562
 
1562
 
1563
static void vlv_enable_pll(struct intel_crtc *crtc)
1563
static void vlv_enable_pll(struct intel_crtc *crtc)
1564
{
1564
{
1565
	struct drm_device *dev = crtc->base.dev;
1565
	struct drm_device *dev = crtc->base.dev;
1566
	struct drm_i915_private *dev_priv = dev->dev_private;
1566
	struct drm_i915_private *dev_priv = dev->dev_private;
1567
	int reg = DPLL(crtc->pipe);
1567
	int reg = DPLL(crtc->pipe);
1568
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1568
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1569
 
1569
 
1570
	assert_pipe_disabled(dev_priv, crtc->pipe);
1570
	assert_pipe_disabled(dev_priv, crtc->pipe);
1571
 
1571
 
1572
    /* No really, not for ILK+ */
1572
    /* No really, not for ILK+ */
1573
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1573
	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1574
 
1574
 
1575
    /* PLL is protected by panel, make sure we can write it */
1575
    /* PLL is protected by panel, make sure we can write it */
1576
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1576
    if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1577
		assert_panel_unlocked(dev_priv, crtc->pipe);
1577
		assert_panel_unlocked(dev_priv, crtc->pipe);
1578
 
1578
 
1579
	I915_WRITE(reg, dpll);
1579
	I915_WRITE(reg, dpll);
1580
	POSTING_READ(reg);
1580
	POSTING_READ(reg);
1581
	udelay(150);
1581
	udelay(150);
1582
 
1582
 
1583
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1583
	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1584
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1584
		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1585
 
1585
 
1586
	I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1586
	I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1587
	POSTING_READ(DPLL_MD(crtc->pipe));
1587
	POSTING_READ(DPLL_MD(crtc->pipe));
1588
 
1588
 
1589
	/* We do this three times for luck */
1589
	/* We do this three times for luck */
1590
	I915_WRITE(reg, dpll);
1590
	I915_WRITE(reg, dpll);
1591
	POSTING_READ(reg);
1591
	POSTING_READ(reg);
1592
	udelay(150); /* wait for warmup */
1592
	udelay(150); /* wait for warmup */
1593
	I915_WRITE(reg, dpll);
1593
	I915_WRITE(reg, dpll);
1594
	POSTING_READ(reg);
1594
	POSTING_READ(reg);
1595
	udelay(150); /* wait for warmup */
1595
	udelay(150); /* wait for warmup */
1596
	I915_WRITE(reg, dpll);
1596
	I915_WRITE(reg, dpll);
1597
	POSTING_READ(reg);
1597
	POSTING_READ(reg);
1598
	udelay(150); /* wait for warmup */
1598
	udelay(150); /* wait for warmup */
1599
}
1599
}
1600
 
1600
 
1601
static void chv_enable_pll(struct intel_crtc *crtc)
1601
static void chv_enable_pll(struct intel_crtc *crtc)
1602
{
1602
{
1603
	struct drm_device *dev = crtc->base.dev;
1603
	struct drm_device *dev = crtc->base.dev;
1604
	struct drm_i915_private *dev_priv = dev->dev_private;
1604
	struct drm_i915_private *dev_priv = dev->dev_private;
1605
	int pipe = crtc->pipe;
1605
	int pipe = crtc->pipe;
1606
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1606
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1607
	u32 tmp;
1607
	u32 tmp;
1608
 
1608
 
1609
	assert_pipe_disabled(dev_priv, crtc->pipe);
1609
	assert_pipe_disabled(dev_priv, crtc->pipe);
1610
 
1610
 
1611
	BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1611
	BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1612
 
1612
 
1613
	mutex_lock(&dev_priv->dpio_lock);
1613
	mutex_lock(&dev_priv->dpio_lock);
1614
 
1614
 
1615
	/* Enable back the 10bit clock to display controller */
1615
	/* Enable back the 10bit clock to display controller */
1616
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1616
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1617
	tmp |= DPIO_DCLKP_EN;
1617
	tmp |= DPIO_DCLKP_EN;
1618
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1618
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1619
 
1619
 
1620
	/*
1620
	/*
1621
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1621
	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1622
	 */
1622
	 */
1623
	udelay(1);
1623
	udelay(1);
1624
 
1624
 
1625
	/* Enable PLL */
1625
	/* Enable PLL */
1626
	I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
1626
	I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
1627
 
1627
 
1628
	/* Check PLL is locked */
1628
	/* Check PLL is locked */
1629
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1629
	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1630
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1630
		DRM_ERROR("PLL %d failed to lock\n", pipe);
1631
 
1631
 
1632
	/* not sure when this should be written */
1632
	/* not sure when this should be written */
1633
	I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
1633
	I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
1634
	POSTING_READ(DPLL_MD(pipe));
1634
	POSTING_READ(DPLL_MD(pipe));
1635
 
1635
 
1636
	mutex_unlock(&dev_priv->dpio_lock);
1636
	mutex_unlock(&dev_priv->dpio_lock);
1637
}
1637
}
1638
 
1638
 
1639
static void i9xx_enable_pll(struct intel_crtc *crtc)
1639
static void i9xx_enable_pll(struct intel_crtc *crtc)
1640
{
1640
{
1641
	struct drm_device *dev = crtc->base.dev;
1641
	struct drm_device *dev = crtc->base.dev;
1642
	struct drm_i915_private *dev_priv = dev->dev_private;
1642
	struct drm_i915_private *dev_priv = dev->dev_private;
1643
	int reg = DPLL(crtc->pipe);
1643
	int reg = DPLL(crtc->pipe);
1644
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1644
	u32 dpll = crtc->config.dpll_hw_state.dpll;
1645
 
1645
 
1646
	assert_pipe_disabled(dev_priv, crtc->pipe);
1646
	assert_pipe_disabled(dev_priv, crtc->pipe);
1647
 
1647
 
1648
	/* No really, not for ILK+ */
1648
	/* No really, not for ILK+ */
1649
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
1649
	BUG_ON(INTEL_INFO(dev)->gen >= 5);
1650
 
1650
 
1651
	/* PLL is protected by panel, make sure we can write it */
1651
	/* PLL is protected by panel, make sure we can write it */
1652
	if (IS_MOBILE(dev) && !IS_I830(dev))
1652
	if (IS_MOBILE(dev) && !IS_I830(dev))
1653
		assert_panel_unlocked(dev_priv, crtc->pipe);
1653
		assert_panel_unlocked(dev_priv, crtc->pipe);
1654
 
1654
 
1655
	I915_WRITE(reg, dpll);
1655
	I915_WRITE(reg, dpll);
1656
 
1656
 
1657
	/* Wait for the clocks to stabilize. */
1657
	/* Wait for the clocks to stabilize. */
1658
	POSTING_READ(reg);
1658
	POSTING_READ(reg);
1659
	udelay(150);
1659
	udelay(150);
1660
 
1660
 
1661
	if (INTEL_INFO(dev)->gen >= 4) {
1661
	if (INTEL_INFO(dev)->gen >= 4) {
1662
		I915_WRITE(DPLL_MD(crtc->pipe),
1662
		I915_WRITE(DPLL_MD(crtc->pipe),
1663
			   crtc->config.dpll_hw_state.dpll_md);
1663
			   crtc->config.dpll_hw_state.dpll_md);
1664
	} else {
1664
	} else {
1665
		/* The pixel multiplier can only be updated once the
1665
		/* The pixel multiplier can only be updated once the
1666
		 * DPLL is enabled and the clocks are stable.
1666
		 * DPLL is enabled and the clocks are stable.
1667
		 *
1667
		 *
1668
		 * So write it again.
1668
		 * So write it again.
1669
		 */
1669
		 */
1670
		I915_WRITE(reg, dpll);
1670
		I915_WRITE(reg, dpll);
1671
	}
1671
	}
1672
 
1672
 
1673
    /* We do this three times for luck */
1673
    /* We do this three times for luck */
1674
	I915_WRITE(reg, dpll);
1674
	I915_WRITE(reg, dpll);
1675
    POSTING_READ(reg);
1675
    POSTING_READ(reg);
1676
    udelay(150); /* wait for warmup */
1676
    udelay(150); /* wait for warmup */
1677
	I915_WRITE(reg, dpll);
1677
	I915_WRITE(reg, dpll);
1678
    POSTING_READ(reg);
1678
    POSTING_READ(reg);
1679
    udelay(150); /* wait for warmup */
1679
    udelay(150); /* wait for warmup */
1680
	I915_WRITE(reg, dpll);
1680
	I915_WRITE(reg, dpll);
1681
    POSTING_READ(reg);
1681
    POSTING_READ(reg);
1682
    udelay(150); /* wait for warmup */
1682
    udelay(150); /* wait for warmup */
1683
}
1683
}
1684
 
1684
 
1685
/**
1685
/**
1686
 * i9xx_disable_pll - disable a PLL
1686
 * i9xx_disable_pll - disable a PLL
1687
 * @dev_priv: i915 private structure
1687
 * @dev_priv: i915 private structure
1688
 * @pipe: pipe PLL to disable
1688
 * @pipe: pipe PLL to disable
1689
 *
1689
 *
1690
 * Disable the PLL for @pipe, making sure the pipe is off first.
1690
 * Disable the PLL for @pipe, making sure the pipe is off first.
1691
 *
1691
 *
1692
 * Note!  This is for pre-ILK only.
1692
 * Note!  This is for pre-ILK only.
1693
 */
1693
 */
1694
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1694
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1695
{
1695
{
1696
	/* Don't disable pipe A or pipe A PLLs if needed */
1696
	/* Don't disable pipe A or pipe A PLLs if needed */
1697
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1697
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1698
		return;
1698
		return;
1699
 
1699
 
1700
	/* Make sure the pipe isn't still relying on us */
1700
	/* Make sure the pipe isn't still relying on us */
1701
	assert_pipe_disabled(dev_priv, pipe);
1701
	assert_pipe_disabled(dev_priv, pipe);
1702
 
1702
 
1703
	I915_WRITE(DPLL(pipe), 0);
1703
	I915_WRITE(DPLL(pipe), 0);
1704
	POSTING_READ(DPLL(pipe));
1704
	POSTING_READ(DPLL(pipe));
1705
}
1705
}
1706
 
1706
 
1707
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1707
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1708
{
1708
{
1709
	u32 val = 0;
1709
	u32 val = 0;
1710
 
1710
 
1711
	/* Make sure the pipe isn't still relying on us */
1711
	/* Make sure the pipe isn't still relying on us */
1712
	assert_pipe_disabled(dev_priv, pipe);
1712
	assert_pipe_disabled(dev_priv, pipe);
1713
 
1713
 
1714
	/*
1714
	/*
1715
	 * Leave integrated clock source and reference clock enabled for pipe B.
1715
	 * Leave integrated clock source and reference clock enabled for pipe B.
1716
	 * The latter is needed for VGA hotplug / manual detection.
1716
	 * The latter is needed for VGA hotplug / manual detection.
1717
	 */
1717
	 */
1718
	if (pipe == PIPE_B)
1718
	if (pipe == PIPE_B)
1719
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1719
		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1720
	I915_WRITE(DPLL(pipe), val);
1720
	I915_WRITE(DPLL(pipe), val);
1721
	POSTING_READ(DPLL(pipe));
1721
	POSTING_READ(DPLL(pipe));
1722
 
1722
 
1723
}
1723
}
1724
 
1724
 
1725
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1725
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1726
{
1726
{
1727
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1727
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1728
	u32 val;
1728
	u32 val;
1729
 
1729
 
1730
	/* Make sure the pipe isn't still relying on us */
1730
	/* Make sure the pipe isn't still relying on us */
1731
	assert_pipe_disabled(dev_priv, pipe);
1731
	assert_pipe_disabled(dev_priv, pipe);
1732
 
1732
 
1733
	/* Set PLL en = 0 */
1733
	/* Set PLL en = 0 */
1734
	val = DPLL_SSC_REF_CLOCK_CHV;
1734
	val = DPLL_SSC_REF_CLOCK_CHV;
1735
	if (pipe != PIPE_A)
1735
	if (pipe != PIPE_A)
1736
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1736
		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1737
	I915_WRITE(DPLL(pipe), val);
1737
	I915_WRITE(DPLL(pipe), val);
1738
	POSTING_READ(DPLL(pipe));
1738
	POSTING_READ(DPLL(pipe));
1739
 
1739
 
1740
	mutex_lock(&dev_priv->dpio_lock);
1740
	mutex_lock(&dev_priv->dpio_lock);
1741
 
1741
 
1742
	/* Disable 10bit clock to display controller */
1742
	/* Disable 10bit clock to display controller */
1743
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1743
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1744
	val &= ~DPIO_DCLKP_EN;
1744
	val &= ~DPIO_DCLKP_EN;
1745
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1745
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1746
 
1746
 
1747
	/* disable left/right clock distribution */
1747
	/* disable left/right clock distribution */
1748
	if (pipe != PIPE_B) {
1748
	if (pipe != PIPE_B) {
1749
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1749
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1750
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1750
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1751
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1751
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1752
	} else {
1752
	} else {
1753
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1753
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1754
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1754
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1755
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1755
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1756
	}
1756
	}
1757
 
1757
 
1758
	mutex_unlock(&dev_priv->dpio_lock);
1758
	mutex_unlock(&dev_priv->dpio_lock);
1759
}
1759
}
1760
 
1760
 
1761
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1761
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1762
		struct intel_digital_port *dport)
1762
		struct intel_digital_port *dport)
1763
{
1763
{
1764
	u32 port_mask;
1764
	u32 port_mask;
1765
	int dpll_reg;
1765
	int dpll_reg;
1766
 
1766
 
1767
	switch (dport->port) {
1767
	switch (dport->port) {
1768
	case PORT_B:
1768
	case PORT_B:
1769
		port_mask = DPLL_PORTB_READY_MASK;
1769
		port_mask = DPLL_PORTB_READY_MASK;
1770
		dpll_reg = DPLL(0);
1770
		dpll_reg = DPLL(0);
1771
		break;
1771
		break;
1772
	case PORT_C:
1772
	case PORT_C:
1773
		port_mask = DPLL_PORTC_READY_MASK;
1773
		port_mask = DPLL_PORTC_READY_MASK;
1774
		dpll_reg = DPLL(0);
1774
		dpll_reg = DPLL(0);
1775
		break;
1775
		break;
1776
	case PORT_D:
1776
	case PORT_D:
1777
		port_mask = DPLL_PORTD_READY_MASK;
1777
		port_mask = DPLL_PORTD_READY_MASK;
1778
		dpll_reg = DPIO_PHY_STATUS;
1778
		dpll_reg = DPIO_PHY_STATUS;
1779
		break;
1779
		break;
1780
	default:
1780
	default:
1781
		BUG();
1781
		BUG();
1782
	}
1782
	}
1783
 
1783
 
1784
	if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1784
	if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1785
		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1785
		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1786
		     port_name(dport->port), I915_READ(dpll_reg));
1786
		     port_name(dport->port), I915_READ(dpll_reg));
1787
}
1787
}
1788
 
1788
 
1789
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1789
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1790
{
1790
{
1791
	struct drm_device *dev = crtc->base.dev;
1791
	struct drm_device *dev = crtc->base.dev;
1792
	struct drm_i915_private *dev_priv = dev->dev_private;
1792
	struct drm_i915_private *dev_priv = dev->dev_private;
1793
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1793
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1794
 
1794
 
1795
	if (WARN_ON(pll == NULL))
1795
	if (WARN_ON(pll == NULL))
1796
		return;
1796
		return;
1797
 
1797
 
1798
	WARN_ON(!pll->refcount);
1798
	WARN_ON(!pll->refcount);
1799
	if (pll->active == 0) {
1799
	if (pll->active == 0) {
1800
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1800
		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1801
		WARN_ON(pll->on);
1801
		WARN_ON(pll->on);
1802
		assert_shared_dpll_disabled(dev_priv, pll);
1802
		assert_shared_dpll_disabled(dev_priv, pll);
1803
 
1803
 
1804
		pll->mode_set(dev_priv, pll);
1804
		pll->mode_set(dev_priv, pll);
1805
	}
1805
	}
1806
}
1806
}
1807
 
1807
 
1808
/**
1808
/**
1809
 * intel_enable_shared_dpll - enable PCH PLL
1809
 * intel_enable_shared_dpll - enable PCH PLL
1810
 * @dev_priv: i915 private structure
1810
 * @dev_priv: i915 private structure
1811
 * @pipe: pipe PLL to enable
1811
 * @pipe: pipe PLL to enable
1812
 *
1812
 *
1813
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1813
 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1814
 * drives the transcoder clock.
1814
 * drives the transcoder clock.
1815
 */
1815
 */
1816
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1816
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1817
{
1817
{
1818
	struct drm_device *dev = crtc->base.dev;
1818
	struct drm_device *dev = crtc->base.dev;
1819
	struct drm_i915_private *dev_priv = dev->dev_private;
1819
	struct drm_i915_private *dev_priv = dev->dev_private;
1820
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1820
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1821
 
1821
 
1822
	if (WARN_ON(pll == NULL))
1822
	if (WARN_ON(pll == NULL))
1823
		return;
1823
		return;
1824
 
1824
 
1825
	if (WARN_ON(pll->refcount == 0))
1825
	if (WARN_ON(pll->refcount == 0))
1826
		return;
1826
		return;
1827
 
1827
 
1828
	DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1828
	DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1829
		      pll->name, pll->active, pll->on,
1829
		      pll->name, pll->active, pll->on,
1830
		      crtc->base.base.id);
1830
		      crtc->base.base.id);
1831
 
1831
 
1832
	if (pll->active++) {
1832
	if (pll->active++) {
1833
		WARN_ON(!pll->on);
1833
		WARN_ON(!pll->on);
1834
		assert_shared_dpll_enabled(dev_priv, pll);
1834
		assert_shared_dpll_enabled(dev_priv, pll);
1835
		return;
1835
		return;
1836
	}
1836
	}
1837
	WARN_ON(pll->on);
1837
	WARN_ON(pll->on);
1838
 
1838
 
1839
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1839
	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1840
 
1840
 
1841
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1841
	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1842
	pll->enable(dev_priv, pll);
1842
	pll->enable(dev_priv, pll);
1843
	pll->on = true;
1843
	pll->on = true;
1844
}
1844
}
1845
 
1845
 
1846
void intel_disable_shared_dpll(struct intel_crtc *crtc)
1846
void intel_disable_shared_dpll(struct intel_crtc *crtc)
1847
{
1847
{
1848
	struct drm_device *dev = crtc->base.dev;
1848
	struct drm_device *dev = crtc->base.dev;
1849
	struct drm_i915_private *dev_priv = dev->dev_private;
1849
	struct drm_i915_private *dev_priv = dev->dev_private;
1850
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1850
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1851
 
1851
 
1852
	/* PCH only available on ILK+ */
1852
	/* PCH only available on ILK+ */
1853
	BUG_ON(INTEL_INFO(dev)->gen < 5);
1853
	BUG_ON(INTEL_INFO(dev)->gen < 5);
1854
	if (WARN_ON(pll == NULL))
1854
	if (WARN_ON(pll == NULL))
1855
	       return;
1855
	       return;
1856
 
1856
 
1857
	if (WARN_ON(pll->refcount == 0))
1857
	if (WARN_ON(pll->refcount == 0))
1858
		return;
1858
		return;
1859
 
1859
 
1860
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1860
	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1861
		      pll->name, pll->active, pll->on,
1861
		      pll->name, pll->active, pll->on,
1862
		      crtc->base.base.id);
1862
		      crtc->base.base.id);
1863
 
1863
 
1864
	if (WARN_ON(pll->active == 0)) {
1864
	if (WARN_ON(pll->active == 0)) {
1865
		assert_shared_dpll_disabled(dev_priv, pll);
1865
		assert_shared_dpll_disabled(dev_priv, pll);
1866
		return;
1866
		return;
1867
	}
1867
	}
1868
 
1868
 
1869
	assert_shared_dpll_enabled(dev_priv, pll);
1869
	assert_shared_dpll_enabled(dev_priv, pll);
1870
	WARN_ON(!pll->on);
1870
	WARN_ON(!pll->on);
1871
	if (--pll->active)
1871
	if (--pll->active)
1872
		return;
1872
		return;
1873
 
1873
 
1874
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1874
	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1875
	pll->disable(dev_priv, pll);
1875
	pll->disable(dev_priv, pll);
1876
	pll->on = false;
1876
	pll->on = false;
1877
 
1877
 
1878
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1878
	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1879
}
1879
}
1880
 
1880
 
1881
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1881
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1882
				    enum pipe pipe)
1882
				    enum pipe pipe)
1883
{
1883
{
1884
	struct drm_device *dev = dev_priv->dev;
1884
	struct drm_device *dev = dev_priv->dev;
1885
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1885
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1886
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1886
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1887
	uint32_t reg, val, pipeconf_val;
1887
	uint32_t reg, val, pipeconf_val;
1888
 
1888
 
1889
	/* PCH only available on ILK+ */
1889
	/* PCH only available on ILK+ */
1890
	BUG_ON(INTEL_INFO(dev)->gen < 5);
1890
	BUG_ON(INTEL_INFO(dev)->gen < 5);
1891
 
1891
 
1892
	/* Make sure PCH DPLL is enabled */
1892
	/* Make sure PCH DPLL is enabled */
1893
	assert_shared_dpll_enabled(dev_priv,
1893
	assert_shared_dpll_enabled(dev_priv,
1894
				   intel_crtc_to_shared_dpll(intel_crtc));
1894
				   intel_crtc_to_shared_dpll(intel_crtc));
1895
 
1895
 
1896
	/* FDI must be feeding us bits for PCH ports */
1896
	/* FDI must be feeding us bits for PCH ports */
1897
	assert_fdi_tx_enabled(dev_priv, pipe);
1897
	assert_fdi_tx_enabled(dev_priv, pipe);
1898
	assert_fdi_rx_enabled(dev_priv, pipe);
1898
	assert_fdi_rx_enabled(dev_priv, pipe);
1899
 
1899
 
1900
	if (HAS_PCH_CPT(dev)) {
1900
	if (HAS_PCH_CPT(dev)) {
1901
		/* Workaround: Set the timing override bit before enabling the
1901
		/* Workaround: Set the timing override bit before enabling the
1902
		 * pch transcoder. */
1902
		 * pch transcoder. */
1903
		reg = TRANS_CHICKEN2(pipe);
1903
		reg = TRANS_CHICKEN2(pipe);
1904
		val = I915_READ(reg);
1904
		val = I915_READ(reg);
1905
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1905
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1906
		I915_WRITE(reg, val);
1906
		I915_WRITE(reg, val);
1907
	}
1907
	}
1908
 
1908
 
1909
	reg = PCH_TRANSCONF(pipe);
1909
	reg = PCH_TRANSCONF(pipe);
1910
	val = I915_READ(reg);
1910
	val = I915_READ(reg);
1911
	pipeconf_val = I915_READ(PIPECONF(pipe));
1911
	pipeconf_val = I915_READ(PIPECONF(pipe));
1912
 
1912
 
1913
	if (HAS_PCH_IBX(dev_priv->dev)) {
1913
	if (HAS_PCH_IBX(dev_priv->dev)) {
1914
		/*
1914
		/*
1915
		 * make the BPC in transcoder be consistent with
1915
		 * make the BPC in transcoder be consistent with
1916
		 * that in pipeconf reg.
1916
		 * that in pipeconf reg.
1917
		 */
1917
		 */
1918
		val &= ~PIPECONF_BPC_MASK;
1918
		val &= ~PIPECONF_BPC_MASK;
1919
		val |= pipeconf_val & PIPECONF_BPC_MASK;
1919
		val |= pipeconf_val & PIPECONF_BPC_MASK;
1920
	}
1920
	}
1921
 
1921
 
1922
	val &= ~TRANS_INTERLACE_MASK;
1922
	val &= ~TRANS_INTERLACE_MASK;
1923
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1923
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1924
		if (HAS_PCH_IBX(dev_priv->dev) &&
1924
		if (HAS_PCH_IBX(dev_priv->dev) &&
1925
		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1925
		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1926
			val |= TRANS_LEGACY_INTERLACED_ILK;
1926
			val |= TRANS_LEGACY_INTERLACED_ILK;
1927
		else
1927
		else
1928
			val |= TRANS_INTERLACED;
1928
			val |= TRANS_INTERLACED;
1929
	else
1929
	else
1930
		val |= TRANS_PROGRESSIVE;
1930
		val |= TRANS_PROGRESSIVE;
1931
 
1931
 
1932
	I915_WRITE(reg, val | TRANS_ENABLE);
1932
	I915_WRITE(reg, val | TRANS_ENABLE);
1933
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1933
	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1934
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1934
		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1935
}
1935
}
1936
 
1936
 
1937
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1937
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1938
				      enum transcoder cpu_transcoder)
1938
				      enum transcoder cpu_transcoder)
1939
{
1939
{
1940
	u32 val, pipeconf_val;
1940
	u32 val, pipeconf_val;
1941
 
1941
 
1942
	/* PCH only available on ILK+ */
1942
	/* PCH only available on ILK+ */
1943
	BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
1943
	BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
1944
 
1944
 
1945
	/* FDI must be feeding us bits for PCH ports */
1945
	/* FDI must be feeding us bits for PCH ports */
1946
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1946
	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1947
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1947
	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1948
 
1948
 
1949
	/* Workaround: set timing override bit. */
1949
	/* Workaround: set timing override bit. */
1950
	val = I915_READ(_TRANSA_CHICKEN2);
1950
	val = I915_READ(_TRANSA_CHICKEN2);
1951
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1951
	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1952
	I915_WRITE(_TRANSA_CHICKEN2, val);
1952
	I915_WRITE(_TRANSA_CHICKEN2, val);
1953
 
1953
 
1954
	val = TRANS_ENABLE;
1954
	val = TRANS_ENABLE;
1955
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1955
	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1956
 
1956
 
1957
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1957
	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1958
	    PIPECONF_INTERLACED_ILK)
1958
	    PIPECONF_INTERLACED_ILK)
1959
		val |= TRANS_INTERLACED;
1959
		val |= TRANS_INTERLACED;
1960
	else
1960
	else
1961
		val |= TRANS_PROGRESSIVE;
1961
		val |= TRANS_PROGRESSIVE;
1962
 
1962
 
1963
	I915_WRITE(LPT_TRANSCONF, val);
1963
	I915_WRITE(LPT_TRANSCONF, val);
1964
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1964
	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1965
		DRM_ERROR("Failed to enable PCH transcoder\n");
1965
		DRM_ERROR("Failed to enable PCH transcoder\n");
1966
}
1966
}
1967
 
1967
 
1968
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1968
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1969
				     enum pipe pipe)
1969
				     enum pipe pipe)
1970
{
1970
{
1971
	struct drm_device *dev = dev_priv->dev;
1971
	struct drm_device *dev = dev_priv->dev;
1972
	uint32_t reg, val;
1972
	uint32_t reg, val;
1973
 
1973
 
1974
	/* FDI relies on the transcoder */
1974
	/* FDI relies on the transcoder */
1975
	assert_fdi_tx_disabled(dev_priv, pipe);
1975
	assert_fdi_tx_disabled(dev_priv, pipe);
1976
	assert_fdi_rx_disabled(dev_priv, pipe);
1976
	assert_fdi_rx_disabled(dev_priv, pipe);
1977
 
1977
 
1978
	/* Ports must be off as well */
1978
	/* Ports must be off as well */
1979
	assert_pch_ports_disabled(dev_priv, pipe);
1979
	assert_pch_ports_disabled(dev_priv, pipe);
1980
 
1980
 
1981
	reg = PCH_TRANSCONF(pipe);
1981
	reg = PCH_TRANSCONF(pipe);
1982
	val = I915_READ(reg);
1982
	val = I915_READ(reg);
1983
	val &= ~TRANS_ENABLE;
1983
	val &= ~TRANS_ENABLE;
1984
	I915_WRITE(reg, val);
1984
	I915_WRITE(reg, val);
1985
	/* wait for PCH transcoder off, transcoder state */
1985
	/* wait for PCH transcoder off, transcoder state */
1986
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1986
	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1987
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1987
		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1988
 
1988
 
1989
	if (!HAS_PCH_IBX(dev)) {
1989
	if (!HAS_PCH_IBX(dev)) {
1990
		/* Workaround: Clear the timing override chicken bit again. */
1990
		/* Workaround: Clear the timing override chicken bit again. */
1991
		reg = TRANS_CHICKEN2(pipe);
1991
		reg = TRANS_CHICKEN2(pipe);
1992
		val = I915_READ(reg);
1992
		val = I915_READ(reg);
1993
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1993
		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1994
		I915_WRITE(reg, val);
1994
		I915_WRITE(reg, val);
1995
	}
1995
	}
1996
}
1996
}
1997
 
1997
 
1998
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1998
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1999
{
1999
{
2000
	u32 val;
2000
	u32 val;
2001
 
2001
 
2002
	val = I915_READ(LPT_TRANSCONF);
2002
	val = I915_READ(LPT_TRANSCONF);
2003
	val &= ~TRANS_ENABLE;
2003
	val &= ~TRANS_ENABLE;
2004
	I915_WRITE(LPT_TRANSCONF, val);
2004
	I915_WRITE(LPT_TRANSCONF, val);
2005
	/* wait for PCH transcoder off, transcoder state */
2005
	/* wait for PCH transcoder off, transcoder state */
2006
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2006
	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2007
		DRM_ERROR("Failed to disable PCH transcoder\n");
2007
		DRM_ERROR("Failed to disable PCH transcoder\n");
2008
 
2008
 
2009
	/* Workaround: clear timing override bit. */
2009
	/* Workaround: clear timing override bit. */
2010
	val = I915_READ(_TRANSA_CHICKEN2);
2010
	val = I915_READ(_TRANSA_CHICKEN2);
2011
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2011
	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2012
	I915_WRITE(_TRANSA_CHICKEN2, val);
2012
	I915_WRITE(_TRANSA_CHICKEN2, val);
2013
}
2013
}
2014
 
2014
 
2015
/**
2015
/**
2016
 * intel_enable_pipe - enable a pipe, asserting requirements
2016
 * intel_enable_pipe - enable a pipe, asserting requirements
2017
 * @crtc: crtc responsible for the pipe
2017
 * @crtc: crtc responsible for the pipe
2018
 *
2018
 *
2019
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2019
 * Enable @crtc's pipe, making sure that various hardware specific requirements
2020
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2020
 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2021
 */
2021
 */
2022
static void intel_enable_pipe(struct intel_crtc *crtc)
2022
static void intel_enable_pipe(struct intel_crtc *crtc)
2023
{
2023
{
2024
	struct drm_device *dev = crtc->base.dev;
2024
	struct drm_device *dev = crtc->base.dev;
2025
	struct drm_i915_private *dev_priv = dev->dev_private;
2025
	struct drm_i915_private *dev_priv = dev->dev_private;
2026
	enum pipe pipe = crtc->pipe;
2026
	enum pipe pipe = crtc->pipe;
2027
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2027
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2028
								      pipe);
2028
								      pipe);
2029
	enum pipe pch_transcoder;
2029
	enum pipe pch_transcoder;
2030
	int reg;
2030
	int reg;
2031
	u32 val;
2031
	u32 val;
2032
 
2032
 
2033
	assert_planes_disabled(dev_priv, pipe);
2033
	assert_planes_disabled(dev_priv, pipe);
2034
	assert_cursor_disabled(dev_priv, pipe);
2034
	assert_cursor_disabled(dev_priv, pipe);
2035
	assert_sprites_disabled(dev_priv, pipe);
2035
	assert_sprites_disabled(dev_priv, pipe);
2036
 
2036
 
2037
	if (HAS_PCH_LPT(dev_priv->dev))
2037
	if (HAS_PCH_LPT(dev_priv->dev))
2038
		pch_transcoder = TRANSCODER_A;
2038
		pch_transcoder = TRANSCODER_A;
2039
	else
2039
	else
2040
		pch_transcoder = pipe;
2040
		pch_transcoder = pipe;
2041
 
2041
 
2042
	/*
2042
	/*
2043
	 * A pipe without a PLL won't actually be able to drive bits from
2043
	 * A pipe without a PLL won't actually be able to drive bits from
2044
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2044
	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2045
	 * need the check.
2045
	 * need the check.
2046
	 */
2046
	 */
2047
	if (!HAS_PCH_SPLIT(dev_priv->dev))
2047
	if (!HAS_PCH_SPLIT(dev_priv->dev))
2048
		if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
2048
		if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
2049
			assert_dsi_pll_enabled(dev_priv);
2049
			assert_dsi_pll_enabled(dev_priv);
2050
		else
2050
		else
2051
		assert_pll_enabled(dev_priv, pipe);
2051
		assert_pll_enabled(dev_priv, pipe);
2052
	else {
2052
	else {
2053
		if (crtc->config.has_pch_encoder) {
2053
		if (crtc->config.has_pch_encoder) {
2054
			/* if driving the PCH, we need FDI enabled */
2054
			/* if driving the PCH, we need FDI enabled */
2055
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2055
			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2056
			assert_fdi_tx_pll_enabled(dev_priv,
2056
			assert_fdi_tx_pll_enabled(dev_priv,
2057
						  (enum pipe) cpu_transcoder);
2057
						  (enum pipe) cpu_transcoder);
2058
		}
2058
		}
2059
		/* FIXME: assert CPU port conditions for SNB+ */
2059
		/* FIXME: assert CPU port conditions for SNB+ */
2060
	}
2060
	}
2061
 
2061
 
2062
	reg = PIPECONF(cpu_transcoder);
2062
	reg = PIPECONF(cpu_transcoder);
2063
	val = I915_READ(reg);
2063
	val = I915_READ(reg);
2064
	if (val & PIPECONF_ENABLE) {
2064
	if (val & PIPECONF_ENABLE) {
2065
		WARN_ON(!(pipe == PIPE_A &&
2065
		WARN_ON(!(pipe == PIPE_A &&
2066
			  dev_priv->quirks & QUIRK_PIPEA_FORCE));
2066
			  dev_priv->quirks & QUIRK_PIPEA_FORCE));
2067
		return;
2067
		return;
2068
	}
2068
	}
2069
 
2069
 
2070
	I915_WRITE(reg, val | PIPECONF_ENABLE);
2070
	I915_WRITE(reg, val | PIPECONF_ENABLE);
2071
	POSTING_READ(reg);
2071
	POSTING_READ(reg);
2072
}
2072
}
2073
 
2073
 
2074
/**
2074
/**
2075
 * intel_disable_pipe - disable a pipe, asserting requirements
2075
 * intel_disable_pipe - disable a pipe, asserting requirements
2076
 * @dev_priv: i915 private structure
2076
 * @dev_priv: i915 private structure
2077
 * @pipe: pipe to disable
2077
 * @pipe: pipe to disable
2078
 *
2078
 *
2079
 * Disable @pipe, making sure that various hardware specific requirements
2079
 * Disable @pipe, making sure that various hardware specific requirements
2080
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
2080
 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
2081
 *
2081
 *
2082
 * @pipe should be %PIPE_A or %PIPE_B.
2082
 * @pipe should be %PIPE_A or %PIPE_B.
2083
 *
2083
 *
2084
 * Will wait until the pipe has shut down before returning.
2084
 * Will wait until the pipe has shut down before returning.
2085
 */
2085
 */
2086
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
2086
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
2087
			       enum pipe pipe)
2087
			       enum pipe pipe)
2088
{
2088
{
2089
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2089
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2090
								      pipe);
2090
								      pipe);
2091
	int reg;
2091
	int reg;
2092
	u32 val;
2092
	u32 val;
2093
 
2093
 
2094
    /*
2094
    /*
2095
	 * Make sure planes won't keep trying to pump pixels to us,
2095
	 * Make sure planes won't keep trying to pump pixels to us,
2096
	 * or we might hang the display.
2096
	 * or we might hang the display.
2097
	 */
2097
	 */
2098
	assert_planes_disabled(dev_priv, pipe);
2098
	assert_planes_disabled(dev_priv, pipe);
2099
	assert_cursor_disabled(dev_priv, pipe);
2099
	assert_cursor_disabled(dev_priv, pipe);
2100
	assert_sprites_disabled(dev_priv, pipe);
2100
	assert_sprites_disabled(dev_priv, pipe);
2101
 
2101
 
2102
	/* Don't disable pipe A or pipe A PLLs if needed */
2102
	/* Don't disable pipe A or pipe A PLLs if needed */
2103
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2103
	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2104
		return;
2104
		return;
2105
 
2105
 
2106
	reg = PIPECONF(cpu_transcoder);
2106
	reg = PIPECONF(cpu_transcoder);
2107
	val = I915_READ(reg);
2107
	val = I915_READ(reg);
2108
	if ((val & PIPECONF_ENABLE) == 0)
2108
	if ((val & PIPECONF_ENABLE) == 0)
2109
		return;
2109
		return;
2110
 
2110
 
2111
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
2111
	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
2112
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
2112
	intel_wait_for_pipe_off(dev_priv->dev, pipe);
2113
}
2113
}
2114
 
2114
 
2115
/*
2115
/*
2116
 * Plane regs are double buffered, going from enabled->disabled needs a
2116
 * Plane regs are double buffered, going from enabled->disabled needs a
2117
 * trigger in order to latch.  The display address reg provides this.
2117
 * trigger in order to latch.  The display address reg provides this.
2118
 */
2118
 */
2119
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2119
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2120
				      enum plane plane)
2120
				      enum plane plane)
2121
{
2121
{
2122
	struct drm_device *dev = dev_priv->dev;
2122
	struct drm_device *dev = dev_priv->dev;
2123
	u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2123
	u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2124
 
2124
 
2125
	I915_WRITE(reg, I915_READ(reg));
2125
	I915_WRITE(reg, I915_READ(reg));
2126
	POSTING_READ(reg);
2126
	POSTING_READ(reg);
2127
}
2127
}
2128
 
2128
 
2129
/**
2129
/**
2130
 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2130
 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2131
 * @dev_priv: i915 private structure
2131
 * @dev_priv: i915 private structure
2132
 * @plane: plane to enable
2132
 * @plane: plane to enable
2133
 * @pipe: pipe being fed
2133
 * @pipe: pipe being fed
2134
 *
2134
 *
2135
 * Enable @plane on @pipe, making sure that @pipe is running first.
2135
 * Enable @plane on @pipe, making sure that @pipe is running first.
2136
 */
2136
 */
2137
static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
2137
static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
2138
			       enum plane plane, enum pipe pipe)
2138
			       enum plane plane, enum pipe pipe)
2139
{
2139
{
2140
	struct drm_device *dev = dev_priv->dev;
2140
	struct drm_device *dev = dev_priv->dev;
2141
	struct intel_crtc *intel_crtc =
2141
	struct intel_crtc *intel_crtc =
2142
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2142
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2143
	int reg;
2143
	int reg;
2144
	u32 val;
2144
	u32 val;
2145
 
2145
 
2146
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
2146
	/* If the pipe isn't enabled, we can't pump pixels and may hang */
2147
	assert_pipe_enabled(dev_priv, pipe);
2147
	assert_pipe_enabled(dev_priv, pipe);
2148
 
2148
 
2149
	if (intel_crtc->primary_enabled)
2149
	if (intel_crtc->primary_enabled)
2150
		return;
2150
		return;
2151
 
2151
 
2152
	intel_crtc->primary_enabled = true;
2152
	intel_crtc->primary_enabled = true;
2153
 
2153
 
2154
	reg = DSPCNTR(plane);
2154
	reg = DSPCNTR(plane);
2155
	val = I915_READ(reg);
2155
	val = I915_READ(reg);
2156
	WARN_ON(val & DISPLAY_PLANE_ENABLE);
2156
	WARN_ON(val & DISPLAY_PLANE_ENABLE);
2157
 
2157
 
2158
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
2158
	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
2159
	intel_flush_primary_plane(dev_priv, plane);
2159
	intel_flush_primary_plane(dev_priv, plane);
2160
}
2160
}
2161
 
2161
 
2162
/**
2162
/**
2163
 * intel_disable_primary_hw_plane - disable the primary hardware plane
2163
 * intel_disable_primary_hw_plane - disable the primary hardware plane
2164
 * @dev_priv: i915 private structure
2164
 * @dev_priv: i915 private structure
2165
 * @plane: plane to disable
2165
 * @plane: plane to disable
2166
 * @pipe: pipe consuming the data
2166
 * @pipe: pipe consuming the data
2167
 *
2167
 *
2168
 * Disable @plane; should be an independent operation.
2168
 * Disable @plane; should be an independent operation.
2169
 */
2169
 */
2170
static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
2170
static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
2171
				enum plane plane, enum pipe pipe)
2171
				enum plane plane, enum pipe pipe)
2172
{
2172
{
2173
	struct intel_crtc *intel_crtc =
2173
	struct intel_crtc *intel_crtc =
2174
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2174
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2175
	int reg;
2175
	int reg;
2176
	u32 val;
2176
	u32 val;
2177
 
2177
 
2178
	if (!intel_crtc->primary_enabled)
2178
	if (!intel_crtc->primary_enabled)
2179
		return;
2179
		return;
2180
 
2180
 
2181
	intel_crtc->primary_enabled = false;
2181
	intel_crtc->primary_enabled = false;
2182
 
2182
 
2183
	reg = DSPCNTR(plane);
2183
	reg = DSPCNTR(plane);
2184
	val = I915_READ(reg);
2184
	val = I915_READ(reg);
2185
	WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
2185
	WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
2186
 
2186
 
2187
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
2187
	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
2188
	intel_flush_primary_plane(dev_priv, plane);
2188
	intel_flush_primary_plane(dev_priv, plane);
2189
}
2189
}
2190
 
2190
 
2191
static bool need_vtd_wa(struct drm_device *dev)
2191
static bool need_vtd_wa(struct drm_device *dev)
2192
{
2192
{
2193
#ifdef CONFIG_INTEL_IOMMU
2193
#ifdef CONFIG_INTEL_IOMMU
2194
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2194
	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2195
		return true;
2195
		return true;
2196
#endif
2196
#endif
2197
	return false;
2197
	return false;
2198
}
2198
}
2199
 
2199
 
2200
static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2200
static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2201
{
2201
{
2202
	int tile_height;
2202
	int tile_height;
2203
 
2203
 
2204
	tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
2204
	tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
2205
	return ALIGN(height, tile_height);
2205
	return ALIGN(height, tile_height);
2206
}
2206
}
2207
 
2207
 
2208
int
2208
int
2209
intel_pin_and_fence_fb_obj(struct drm_device *dev,
2209
intel_pin_and_fence_fb_obj(struct drm_device *dev,
2210
			   struct drm_i915_gem_object *obj,
2210
			   struct drm_i915_gem_object *obj,
2211
			   struct intel_engine_cs *pipelined)
2211
			   struct intel_engine_cs *pipelined)
2212
{
2212
{
2213
	struct drm_i915_private *dev_priv = dev->dev_private;
2213
	struct drm_i915_private *dev_priv = dev->dev_private;
2214
	u32 alignment;
2214
	u32 alignment;
2215
	int ret;
2215
	int ret;
2216
 
2216
 
2217
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2217
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2218
 
2218
 
2219
	switch (obj->tiling_mode) {
2219
	switch (obj->tiling_mode) {
2220
	case I915_TILING_NONE:
2220
	case I915_TILING_NONE:
2221
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2221
		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2222
			alignment = 128 * 1024;
2222
			alignment = 128 * 1024;
2223
		else if (INTEL_INFO(dev)->gen >= 4)
2223
		else if (INTEL_INFO(dev)->gen >= 4)
2224
			alignment = 4 * 1024;
2224
			alignment = 4 * 1024;
2225
		else
2225
		else
2226
			alignment = 64 * 1024;
2226
			alignment = 64 * 1024;
2227
		break;
2227
		break;
2228
	case I915_TILING_X:
2228
	case I915_TILING_X:
2229
		/* pin() will align the object as required by fence */
2229
		/* pin() will align the object as required by fence */
2230
		alignment = 0;
2230
		alignment = 0;
2231
		break;
2231
		break;
2232
	case I915_TILING_Y:
2232
	case I915_TILING_Y:
2233
		WARN(1, "Y tiled bo slipped through, driver bug!\n");
2233
		WARN(1, "Y tiled bo slipped through, driver bug!\n");
2234
		return -EINVAL;
2234
		return -EINVAL;
2235
	default:
2235
	default:
2236
		BUG();
2236
		BUG();
2237
	}
2237
	}
2238
 
2238
 
2239
	/* Note that the w/a also requires 64 PTE of padding following the
2239
	/* Note that the w/a also requires 64 PTE of padding following the
2240
	 * bo. We currently fill all unused PTE with the shadow page and so
2240
	 * bo. We currently fill all unused PTE with the shadow page and so
2241
	 * we should always have valid PTE following the scanout preventing
2241
	 * we should always have valid PTE following the scanout preventing
2242
	 * the VT-d warning.
2242
	 * the VT-d warning.
2243
	 */
2243
	 */
2244
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2244
	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2245
		alignment = 256 * 1024;
2245
		alignment = 256 * 1024;
-
 
2246
 
-
 
2247
	/*
-
 
2248
	 * Global gtt pte registers are special registers which actually forward
-
 
2249
	 * writes to a chunk of system memory. Which means that there is no risk
-
 
2250
	 * that the register values disappear as soon as we call
-
 
2251
	 * intel_runtime_pm_put(), so it is correct to wrap only the
-
 
2252
	 * pin/unpin/fence and not more.
-
 
2253
	 */
-
 
2254
	intel_runtime_pm_get(dev_priv);
2246
 
2255
 
2247
	dev_priv->mm.interruptible = false;
2256
	dev_priv->mm.interruptible = false;
2248
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2257
	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2249
	if (ret)
2258
	if (ret)
2250
		goto err_interruptible;
2259
		goto err_interruptible;
2251
 
2260
 
2252
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2261
	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2253
	 * fence, whereas 965+ only requires a fence if using
2262
	 * fence, whereas 965+ only requires a fence if using
2254
	 * framebuffer compression.  For simplicity, we always install
2263
	 * framebuffer compression.  For simplicity, we always install
2255
	 * a fence as the cost is not that onerous.
2264
	 * a fence as the cost is not that onerous.
2256
	 */
2265
	 */
2257
	ret = i915_gem_object_get_fence(obj);
2266
	ret = i915_gem_object_get_fence(obj);
2258
	if (ret)
2267
	if (ret)
2259
		goto err_unpin;
2268
		goto err_unpin;
2260
 
2269
 
2261
	i915_gem_object_pin_fence(obj);
2270
	i915_gem_object_pin_fence(obj);
2262
 
2271
 
2263
	dev_priv->mm.interruptible = true;
2272
	dev_priv->mm.interruptible = true;
-
 
2273
	intel_runtime_pm_put(dev_priv);
2264
	return 0;
2274
	return 0;
2265
 
2275
 
2266
err_unpin:
2276
err_unpin:
2267
	i915_gem_object_unpin_from_display_plane(obj);
2277
	i915_gem_object_unpin_from_display_plane(obj);
2268
err_interruptible:
2278
err_interruptible:
2269
	dev_priv->mm.interruptible = true;
2279
	dev_priv->mm.interruptible = true;
-
 
2280
	intel_runtime_pm_put(dev_priv);
2270
	return ret;
2281
	return ret;
2271
}
2282
}
2272
 
2283
 
2273
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2284
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2274
{
2285
{
2275
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2286
	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2276
 
2287
 
2277
	i915_gem_object_unpin_fence(obj);
2288
	i915_gem_object_unpin_fence(obj);
2278
//	i915_gem_object_unpin_from_display_plane(obj);
2289
//	i915_gem_object_unpin_from_display_plane(obj);
2279
}
2290
}
2280
 
2291
 
2281
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2292
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2282
 * is assumed to be a power-of-two. */
2293
 * is assumed to be a power-of-two. */
2283
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2294
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2284
					     unsigned int tiling_mode,
2295
					     unsigned int tiling_mode,
2285
					     unsigned int cpp,
2296
					     unsigned int cpp,
2286
							unsigned int pitch)
2297
							unsigned int pitch)
2287
{
2298
{
2288
	if (tiling_mode != I915_TILING_NONE) {
2299
	if (tiling_mode != I915_TILING_NONE) {
2289
		unsigned int tile_rows, tiles;
2300
		unsigned int tile_rows, tiles;
2290
 
2301
 
2291
	tile_rows = *y / 8;
2302
	tile_rows = *y / 8;
2292
	*y %= 8;
2303
	*y %= 8;
2293
 
2304
 
2294
		tiles = *x / (512/cpp);
2305
		tiles = *x / (512/cpp);
2295
		*x %= 512/cpp;
2306
		*x %= 512/cpp;
2296
 
2307
 
2297
	return tile_rows * pitch * 8 + tiles * 4096;
2308
	return tile_rows * pitch * 8 + tiles * 4096;
2298
	} else {
2309
	} else {
2299
		unsigned int offset;
2310
		unsigned int offset;
2300
 
2311
 
2301
		offset = *y * pitch + *x * cpp;
2312
		offset = *y * pitch + *x * cpp;
2302
		*y = 0;
2313
		*y = 0;
2303
		*x = (offset & 4095) / cpp;
2314
		*x = (offset & 4095) / cpp;
2304
		return offset & -4096;
2315
		return offset & -4096;
2305
	}
2316
	}
2306
}
2317
}
2307
 
2318
 
2308
int intel_format_to_fourcc(int format)
2319
int intel_format_to_fourcc(int format)
2309
{
2320
{
2310
	switch (format) {
2321
	switch (format) {
2311
	case DISPPLANE_8BPP:
2322
	case DISPPLANE_8BPP:
2312
		return DRM_FORMAT_C8;
2323
		return DRM_FORMAT_C8;
2313
	case DISPPLANE_BGRX555:
2324
	case DISPPLANE_BGRX555:
2314
		return DRM_FORMAT_XRGB1555;
2325
		return DRM_FORMAT_XRGB1555;
2315
	case DISPPLANE_BGRX565:
2326
	case DISPPLANE_BGRX565:
2316
		return DRM_FORMAT_RGB565;
2327
		return DRM_FORMAT_RGB565;
2317
	default:
2328
	default:
2318
	case DISPPLANE_BGRX888:
2329
	case DISPPLANE_BGRX888:
2319
		return DRM_FORMAT_XRGB8888;
2330
		return DRM_FORMAT_XRGB8888;
2320
	case DISPPLANE_RGBX888:
2331
	case DISPPLANE_RGBX888:
2321
		return DRM_FORMAT_XBGR8888;
2332
		return DRM_FORMAT_XBGR8888;
2322
	case DISPPLANE_BGRX101010:
2333
	case DISPPLANE_BGRX101010:
2323
		return DRM_FORMAT_XRGB2101010;
2334
		return DRM_FORMAT_XRGB2101010;
2324
	case DISPPLANE_RGBX101010:
2335
	case DISPPLANE_RGBX101010:
2325
		return DRM_FORMAT_XBGR2101010;
2336
		return DRM_FORMAT_XBGR2101010;
2326
	}
2337
	}
2327
}
2338
}
2328
 
2339
 
2329
static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2340
static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2330
				  struct intel_plane_config *plane_config)
2341
				  struct intel_plane_config *plane_config)
2331
{
2342
{
2332
	struct drm_device *dev = crtc->base.dev;
2343
	struct drm_device *dev = crtc->base.dev;
2333
	struct drm_i915_gem_object *obj = NULL;
2344
	struct drm_i915_gem_object *obj = NULL;
2334
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2345
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2335
	u32 base = plane_config->base;
2346
	u32 base = plane_config->base;
2336
 
2347
 
2337
	if (plane_config->size == 0)
2348
	if (plane_config->size == 0)
2338
		return false;
2349
		return false;
2339
 
2350
 
2340
	obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2351
	obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2341
							     plane_config->size);
2352
							     plane_config->size);
2342
	if (!obj)
2353
	if (!obj)
2343
		return false;
2354
		return false;
2344
 
2355
 
2345
    main_fb_obj = obj;
2356
    main_fb_obj = obj;
2346
 
2357
 
2347
	if (plane_config->tiled) {
2358
	if (plane_config->tiled) {
2348
		obj->tiling_mode = I915_TILING_X;
2359
		obj->tiling_mode = I915_TILING_X;
2349
		obj->stride = crtc->base.primary->fb->pitches[0];
2360
		obj->stride = crtc->base.primary->fb->pitches[0];
2350
	}
2361
	}
2351
 
2362
 
2352
	mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2363
	mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2353
	mode_cmd.width = crtc->base.primary->fb->width;
2364
	mode_cmd.width = crtc->base.primary->fb->width;
2354
	mode_cmd.height = crtc->base.primary->fb->height;
2365
	mode_cmd.height = crtc->base.primary->fb->height;
2355
	mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2366
	mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2356
 
2367
 
2357
	mutex_lock(&dev->struct_mutex);
2368
	mutex_lock(&dev->struct_mutex);
2358
 
2369
 
2359
	if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2370
	if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2360
				   &mode_cmd, obj)) {
2371
				   &mode_cmd, obj)) {
2361
		DRM_DEBUG_KMS("intel fb init failed\n");
2372
		DRM_DEBUG_KMS("intel fb init failed\n");
2362
		goto out_unref_obj;
2373
		goto out_unref_obj;
2363
	}
2374
	}
2364
 
2375
 
2365
	obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2376
	obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2366
	mutex_unlock(&dev->struct_mutex);
2377
	mutex_unlock(&dev->struct_mutex);
2367
 
2378
 
2368
	DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2379
	DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2369
	return true;
2380
	return true;
2370
 
2381
 
2371
out_unref_obj:
2382
out_unref_obj:
2372
	drm_gem_object_unreference(&obj->base);
2383
	drm_gem_object_unreference(&obj->base);
2373
	mutex_unlock(&dev->struct_mutex);
2384
	mutex_unlock(&dev->struct_mutex);
2374
	return false;
2385
	return false;
2375
}
2386
}
2376
 
2387
 
2377
static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2388
static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2378
				 struct intel_plane_config *plane_config)
2389
				 struct intel_plane_config *plane_config)
2379
{
2390
{
2380
	struct drm_device *dev = intel_crtc->base.dev;
2391
	struct drm_device *dev = intel_crtc->base.dev;
2381
	struct drm_crtc *c;
2392
	struct drm_crtc *c;
2382
	struct intel_crtc *i;
2393
	struct intel_crtc *i;
2383
	struct drm_i915_gem_object *obj;
2394
	struct drm_i915_gem_object *obj;
2384
 
2395
 
2385
	if (!intel_crtc->base.primary->fb)
2396
	if (!intel_crtc->base.primary->fb)
2386
		return;
2397
		return;
2387
 
2398
 
2388
	if (intel_alloc_plane_obj(intel_crtc, plane_config))
2399
	if (intel_alloc_plane_obj(intel_crtc, plane_config))
2389
		return;
2400
		return;
2390
 
2401
 
2391
	kfree(intel_crtc->base.primary->fb);
2402
	kfree(intel_crtc->base.primary->fb);
2392
	intel_crtc->base.primary->fb = NULL;
2403
	intel_crtc->base.primary->fb = NULL;
2393
 
2404
 
2394
	/*
2405
	/*
2395
	 * Failed to alloc the obj, check to see if we should share
2406
	 * Failed to alloc the obj, check to see if we should share
2396
	 * an fb with another CRTC instead
2407
	 * an fb with another CRTC instead
2397
	 */
2408
	 */
2398
	for_each_crtc(dev, c) {
2409
	for_each_crtc(dev, c) {
2399
		i = to_intel_crtc(c);
2410
		i = to_intel_crtc(c);
2400
 
2411
 
2401
		if (c == &intel_crtc->base)
2412
		if (c == &intel_crtc->base)
2402
			continue;
2413
			continue;
2403
 
2414
 
2404
		if (!i->active)
2415
		if (!i->active)
2405
			continue;
2416
			continue;
2406
 
2417
 
2407
		obj = intel_fb_obj(c->primary->fb);
2418
		obj = intel_fb_obj(c->primary->fb);
2408
		if (obj == NULL)
2419
		if (obj == NULL)
2409
			continue;
2420
			continue;
2410
 
2421
 
2411
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2422
		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2412
			drm_framebuffer_reference(c->primary->fb);
2423
			drm_framebuffer_reference(c->primary->fb);
2413
			intel_crtc->base.primary->fb = c->primary->fb;
2424
			intel_crtc->base.primary->fb = c->primary->fb;
2414
			obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2425
			obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2415
			break;
2426
			break;
2416
		}
2427
		}
2417
	}
2428
	}
2418
}
2429
}
2419
 
2430
 
2420
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2431
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2421
				     struct drm_framebuffer *fb,
2432
				     struct drm_framebuffer *fb,
2422
				     int x, int y)
2433
				     int x, int y)
2423
{
2434
{
2424
    struct drm_device *dev = crtc->dev;
2435
    struct drm_device *dev = crtc->dev;
2425
    struct drm_i915_private *dev_priv = dev->dev_private;
2436
    struct drm_i915_private *dev_priv = dev->dev_private;
2426
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2437
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2427
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2438
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2428
    int plane = intel_crtc->plane;
2439
    int plane = intel_crtc->plane;
2429
	unsigned long linear_offset;
2440
	unsigned long linear_offset;
2430
    u32 dspcntr;
2441
    u32 dspcntr;
2431
    u32 reg;
2442
    u32 reg;
2432
 
2443
 
2433
    reg = DSPCNTR(plane);
2444
    reg = DSPCNTR(plane);
2434
    dspcntr = I915_READ(reg);
2445
    dspcntr = I915_READ(reg);
2435
    /* Mask out pixel format bits in case we change it */
2446
    /* Mask out pixel format bits in case we change it */
2436
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2447
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2437
	switch (fb->pixel_format) {
2448
	switch (fb->pixel_format) {
2438
	case DRM_FORMAT_C8:
2449
	case DRM_FORMAT_C8:
2439
        dspcntr |= DISPPLANE_8BPP;
2450
        dspcntr |= DISPPLANE_8BPP;
2440
        break;
2451
        break;
2441
	case DRM_FORMAT_XRGB1555:
2452
	case DRM_FORMAT_XRGB1555:
2442
	case DRM_FORMAT_ARGB1555:
2453
	case DRM_FORMAT_ARGB1555:
2443
		dspcntr |= DISPPLANE_BGRX555;
2454
		dspcntr |= DISPPLANE_BGRX555;
2444
		break;
2455
		break;
2445
	case DRM_FORMAT_RGB565:
2456
	case DRM_FORMAT_RGB565:
2446
		dspcntr |= DISPPLANE_BGRX565;
2457
		dspcntr |= DISPPLANE_BGRX565;
2447
		break;
2458
		break;
2448
	case DRM_FORMAT_XRGB8888:
2459
	case DRM_FORMAT_XRGB8888:
2449
	case DRM_FORMAT_ARGB8888:
2460
	case DRM_FORMAT_ARGB8888:
2450
		dspcntr |= DISPPLANE_BGRX888;
2461
		dspcntr |= DISPPLANE_BGRX888;
2451
		break;
2462
		break;
2452
	case DRM_FORMAT_XBGR8888:
2463
	case DRM_FORMAT_XBGR8888:
2453
	case DRM_FORMAT_ABGR8888:
2464
	case DRM_FORMAT_ABGR8888:
2454
		dspcntr |= DISPPLANE_RGBX888;
2465
		dspcntr |= DISPPLANE_RGBX888;
2455
		break;
2466
		break;
2456
	case DRM_FORMAT_XRGB2101010:
2467
	case DRM_FORMAT_XRGB2101010:
2457
	case DRM_FORMAT_ARGB2101010:
2468
	case DRM_FORMAT_ARGB2101010:
2458
		dspcntr |= DISPPLANE_BGRX101010;
2469
		dspcntr |= DISPPLANE_BGRX101010;
2459
        break;
2470
        break;
2460
	case DRM_FORMAT_XBGR2101010:
2471
	case DRM_FORMAT_XBGR2101010:
2461
	case DRM_FORMAT_ABGR2101010:
2472
	case DRM_FORMAT_ABGR2101010:
2462
		dspcntr |= DISPPLANE_RGBX101010;
2473
		dspcntr |= DISPPLANE_RGBX101010;
2463
        break;
2474
        break;
2464
    default:
2475
    default:
2465
		BUG();
2476
		BUG();
2466
    }
2477
    }
2467
 
2478
 
2468
    if (INTEL_INFO(dev)->gen >= 4) {
2479
    if (INTEL_INFO(dev)->gen >= 4) {
2469
        if (obj->tiling_mode != I915_TILING_NONE)
2480
        if (obj->tiling_mode != I915_TILING_NONE)
2470
            dspcntr |= DISPPLANE_TILED;
2481
            dspcntr |= DISPPLANE_TILED;
2471
        else
2482
        else
2472
            dspcntr &= ~DISPPLANE_TILED;
2483
            dspcntr &= ~DISPPLANE_TILED;
2473
    }
2484
    }
2474
 
2485
 
2475
	if (IS_G4X(dev))
2486
	if (IS_G4X(dev))
2476
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2487
		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2477
 
2488
 
2478
    I915_WRITE(reg, dspcntr);
2489
    I915_WRITE(reg, dspcntr);
2479
 
2490
 
2480
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2491
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2481
 
2492
 
2482
	if (INTEL_INFO(dev)->gen >= 4) {
2493
	if (INTEL_INFO(dev)->gen >= 4) {
2483
		intel_crtc->dspaddr_offset =
2494
		intel_crtc->dspaddr_offset =
2484
			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2495
			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2485
							   fb->bits_per_pixel / 8,
2496
							   fb->bits_per_pixel / 8,
2486
							   fb->pitches[0]);
2497
							   fb->pitches[0]);
2487
		linear_offset -= intel_crtc->dspaddr_offset;
2498
		linear_offset -= intel_crtc->dspaddr_offset;
2488
	} else {
2499
	} else {
2489
		intel_crtc->dspaddr_offset = linear_offset;
2500
		intel_crtc->dspaddr_offset = linear_offset;
2490
	}
2501
	}
2491
 
2502
 
2492
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2503
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2493
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2504
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2494
		      fb->pitches[0]);
2505
		      fb->pitches[0]);
2495
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2506
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2496
    if (INTEL_INFO(dev)->gen >= 4) {
2507
    if (INTEL_INFO(dev)->gen >= 4) {
2497
		I915_WRITE(DSPSURF(plane),
2508
		I915_WRITE(DSPSURF(plane),
2498
				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2509
				     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2499
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2510
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2500
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2511
		I915_WRITE(DSPLINOFF(plane), linear_offset);
2501
    } else
2512
    } else
2502
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2513
		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2503
    POSTING_READ(reg);
2514
    POSTING_READ(reg);
2504
}
2515
}
2505
 
2516
 
2506
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2517
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2507
					 struct drm_framebuffer *fb,
2518
					 struct drm_framebuffer *fb,
2508
					 int x, int y)
2519
					 int x, int y)
2509
{
2520
{
2510
    struct drm_device *dev = crtc->dev;
2521
    struct drm_device *dev = crtc->dev;
2511
    struct drm_i915_private *dev_priv = dev->dev_private;
2522
    struct drm_i915_private *dev_priv = dev->dev_private;
2512
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2523
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2513
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2524
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2514
    int plane = intel_crtc->plane;
2525
    int plane = intel_crtc->plane;
2515
	unsigned long linear_offset;
2526
	unsigned long linear_offset;
2516
    u32 dspcntr;
2527
    u32 dspcntr;
2517
    u32 reg;
2528
    u32 reg;
2518
 
2529
 
2519
    reg = DSPCNTR(plane);
2530
    reg = DSPCNTR(plane);
2520
    dspcntr = I915_READ(reg);
2531
    dspcntr = I915_READ(reg);
2521
    /* Mask out pixel format bits in case we change it */
2532
    /* Mask out pixel format bits in case we change it */
2522
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2533
    dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2523
	switch (fb->pixel_format) {
2534
	switch (fb->pixel_format) {
2524
	case DRM_FORMAT_C8:
2535
	case DRM_FORMAT_C8:
2525
        dspcntr |= DISPPLANE_8BPP;
2536
        dspcntr |= DISPPLANE_8BPP;
2526
        break;
2537
        break;
2527
	case DRM_FORMAT_RGB565:
2538
	case DRM_FORMAT_RGB565:
2528
		dspcntr |= DISPPLANE_BGRX565;
2539
		dspcntr |= DISPPLANE_BGRX565;
2529
        break;
2540
        break;
2530
	case DRM_FORMAT_XRGB8888:
2541
	case DRM_FORMAT_XRGB8888:
2531
	case DRM_FORMAT_ARGB8888:
2542
	case DRM_FORMAT_ARGB8888:
2532
		dspcntr |= DISPPLANE_BGRX888;
2543
		dspcntr |= DISPPLANE_BGRX888;
2533
		break;
2544
		break;
2534
	case DRM_FORMAT_XBGR8888:
2545
	case DRM_FORMAT_XBGR8888:
2535
	case DRM_FORMAT_ABGR8888:
2546
	case DRM_FORMAT_ABGR8888:
2536
		dspcntr |= DISPPLANE_RGBX888;
2547
		dspcntr |= DISPPLANE_RGBX888;
2537
		break;
2548
		break;
2538
	case DRM_FORMAT_XRGB2101010:
2549
	case DRM_FORMAT_XRGB2101010:
2539
	case DRM_FORMAT_ARGB2101010:
2550
	case DRM_FORMAT_ARGB2101010:
2540
		dspcntr |= DISPPLANE_BGRX101010;
2551
		dspcntr |= DISPPLANE_BGRX101010;
2541
		break;
2552
		break;
2542
	case DRM_FORMAT_XBGR2101010:
2553
	case DRM_FORMAT_XBGR2101010:
2543
	case DRM_FORMAT_ABGR2101010:
2554
	case DRM_FORMAT_ABGR2101010:
2544
		dspcntr |= DISPPLANE_RGBX101010;
2555
		dspcntr |= DISPPLANE_RGBX101010;
2545
        break;
2556
        break;
2546
    default:
2557
    default:
2547
		BUG();
2558
		BUG();
2548
    }
2559
    }
2549
 
2560
 
2550
	if (obj->tiling_mode != I915_TILING_NONE)
2561
	if (obj->tiling_mode != I915_TILING_NONE)
2551
		dspcntr |= DISPPLANE_TILED;
2562
		dspcntr |= DISPPLANE_TILED;
2552
	else
2563
	else
2553
        dspcntr &= ~DISPPLANE_TILED;
2564
        dspcntr &= ~DISPPLANE_TILED;
2554
 
2565
 
2555
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2566
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2556
		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2567
		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2557
	else
2568
	else
2558
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2569
    dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2559
 
2570
 
2560
    I915_WRITE(reg, dspcntr);
2571
    I915_WRITE(reg, dspcntr);
2561
 
2572
 
2562
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2573
	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2563
	intel_crtc->dspaddr_offset =
2574
	intel_crtc->dspaddr_offset =
2564
		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2575
		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2565
						   fb->bits_per_pixel / 8,
2576
						   fb->bits_per_pixel / 8,
2566
						   fb->pitches[0]);
2577
						   fb->pitches[0]);
2567
	linear_offset -= intel_crtc->dspaddr_offset;
2578
	linear_offset -= intel_crtc->dspaddr_offset;
2568
 
2579
 
2569
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2580
	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2570
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2581
		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2571
		      fb->pitches[0]);
2582
		      fb->pitches[0]);
2572
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2583
	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2573
	I915_WRITE(DSPSURF(plane),
2584
	I915_WRITE(DSPSURF(plane),
2574
			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2585
			     i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2575
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2586
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2576
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2587
		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2577
	} else {
2588
	} else {
2578
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2589
	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2579
	I915_WRITE(DSPLINOFF(plane), linear_offset);
2590
	I915_WRITE(DSPLINOFF(plane), linear_offset);
2580
	}
2591
	}
2581
	POSTING_READ(reg);
2592
	POSTING_READ(reg);
2582
}
2593
}
2583
 
2594
 
2584
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2595
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2585
static int
2596
static int
2586
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2597
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2587
			   int x, int y, enum mode_set_atomic state)
2598
			   int x, int y, enum mode_set_atomic state)
2588
{
2599
{
2589
	struct drm_device *dev = crtc->dev;
2600
	struct drm_device *dev = crtc->dev;
2590
	struct drm_i915_private *dev_priv = dev->dev_private;
2601
	struct drm_i915_private *dev_priv = dev->dev_private;
2591
 
2602
 
2592
	if (dev_priv->display.disable_fbc)
2603
	if (dev_priv->display.disable_fbc)
2593
		dev_priv->display.disable_fbc(dev);
2604
		dev_priv->display.disable_fbc(dev);
2594
	intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
2605
	intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
2595
 
2606
 
2596
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2607
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2597
 
2608
 
2598
	return 0;
2609
	return 0;
2599
}
2610
}
2600
 
2611
 
2601
#if 0
2612
#if 0
2602
void intel_display_handle_reset(struct drm_device *dev)
2613
void intel_display_handle_reset(struct drm_device *dev)
2603
{
2614
{
2604
	struct drm_i915_private *dev_priv = dev->dev_private;
2615
	struct drm_i915_private *dev_priv = dev->dev_private;
2605
	struct drm_crtc *crtc;
2616
	struct drm_crtc *crtc;
2606
 
2617
 
2607
	/*
2618
	/*
2608
	 * Flips in the rings have been nuked by the reset,
2619
	 * Flips in the rings have been nuked by the reset,
2609
	 * so complete all pending flips so that user space
2620
	 * so complete all pending flips so that user space
2610
	 * will get its events and not get stuck.
2621
	 * will get its events and not get stuck.
2611
	 *
2622
	 *
2612
	 * Also update the base address of all primary
2623
	 * Also update the base address of all primary
2613
	 * planes to the the last fb to make sure we're
2624
	 * planes to the the last fb to make sure we're
2614
	 * showing the correct fb after a reset.
2625
	 * showing the correct fb after a reset.
2615
	 *
2626
	 *
2616
	 * Need to make two loops over the crtcs so that we
2627
	 * Need to make two loops over the crtcs so that we
2617
	 * don't try to grab a crtc mutex before the
2628
	 * don't try to grab a crtc mutex before the
2618
	 * pending_flip_queue really got woken up.
2629
	 * pending_flip_queue really got woken up.
2619
	 */
2630
	 */
2620
 
2631
 
2621
	for_each_crtc(dev, crtc) {
2632
	for_each_crtc(dev, crtc) {
2622
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2633
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2623
		enum plane plane = intel_crtc->plane;
2634
		enum plane plane = intel_crtc->plane;
2624
 
2635
 
2625
		intel_prepare_page_flip(dev, plane);
2636
		intel_prepare_page_flip(dev, plane);
2626
		intel_finish_page_flip_plane(dev, plane);
2637
		intel_finish_page_flip_plane(dev, plane);
2627
	}
2638
	}
2628
 
2639
 
2629
	for_each_crtc(dev, crtc) {
2640
	for_each_crtc(dev, crtc) {
2630
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2641
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2631
 
2642
 
2632
		drm_modeset_lock(&crtc->mutex, NULL);
2643
		drm_modeset_lock(&crtc->mutex, NULL);
2633
		/*
2644
		/*
2634
		 * FIXME: Once we have proper support for primary planes (and
2645
		 * FIXME: Once we have proper support for primary planes (and
2635
		 * disabling them without disabling the entire crtc) allow again
2646
		 * disabling them without disabling the entire crtc) allow again
2636
		 * a NULL crtc->primary->fb.
2647
		 * a NULL crtc->primary->fb.
2637
		 */
2648
		 */
2638
		if (intel_crtc->active && crtc->primary->fb)
2649
		if (intel_crtc->active && crtc->primary->fb)
2639
			dev_priv->display.update_primary_plane(crtc,
2650
			dev_priv->display.update_primary_plane(crtc,
2640
							       crtc->primary->fb,
2651
							       crtc->primary->fb,
2641
							       crtc->x,
2652
							       crtc->x,
2642
							       crtc->y);
2653
							       crtc->y);
2643
		drm_modeset_unlock(&crtc->mutex);
2654
		drm_modeset_unlock(&crtc->mutex);
2644
	}
2655
	}
2645
}
2656
}
2646
 
2657
 
2647
static int
2658
static int
2648
intel_finish_fb(struct drm_framebuffer *old_fb)
2659
intel_finish_fb(struct drm_framebuffer *old_fb)
2649
{
2660
{
2650
	struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
2661
	struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
2651
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2662
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2652
	bool was_interruptible = dev_priv->mm.interruptible;
2663
	bool was_interruptible = dev_priv->mm.interruptible;
2653
	int ret;
2664
	int ret;
2654
 
2665
 
2655
	/* Big Hammer, we also need to ensure that any pending
2666
	/* Big Hammer, we also need to ensure that any pending
2656
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2667
	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2657
	 * current scanout is retired before unpinning the old
2668
	 * current scanout is retired before unpinning the old
2658
	 * framebuffer.
2669
	 * framebuffer.
2659
	 *
2670
	 *
2660
	 * This should only fail upon a hung GPU, in which case we
2671
	 * This should only fail upon a hung GPU, in which case we
2661
	 * can safely continue.
2672
	 * can safely continue.
2662
	 */
2673
	 */
2663
	dev_priv->mm.interruptible = false;
2674
	dev_priv->mm.interruptible = false;
2664
	ret = i915_gem_object_finish_gpu(obj);
2675
	ret = i915_gem_object_finish_gpu(obj);
2665
	dev_priv->mm.interruptible = was_interruptible;
2676
	dev_priv->mm.interruptible = was_interruptible;
2666
 
2677
 
2667
	return ret;
2678
	return ret;
2668
}
2679
}
2669
 
2680
 
2670
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2681
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2671
{
2682
{
2672
	struct drm_device *dev = crtc->dev;
2683
	struct drm_device *dev = crtc->dev;
2673
	struct drm_i915_private *dev_priv = dev->dev_private;
2684
	struct drm_i915_private *dev_priv = dev->dev_private;
2674
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2685
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2675
	unsigned long flags;
2686
	unsigned long flags;
2676
	bool pending;
2687
	bool pending;
2677
 
2688
 
2678
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2689
	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2679
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2690
	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2680
		return false;
2691
		return false;
2681
 
2692
 
2682
	spin_lock_irqsave(&dev->event_lock, flags);
2693
	spin_lock_irqsave(&dev->event_lock, flags);
2683
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2694
	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2684
	spin_unlock_irqrestore(&dev->event_lock, flags);
2695
	spin_unlock_irqrestore(&dev->event_lock, flags);
2685
 
2696
 
2686
	return pending;
2697
	return pending;
2687
}
2698
}
2688
#endif
2699
#endif
2689
 
2700
 
2690
static int
2701
static int
2691
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2702
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2692
		    struct drm_framebuffer *fb)
2703
		    struct drm_framebuffer *fb)
2693
{
2704
{
2694
	struct drm_device *dev = crtc->dev;
2705
	struct drm_device *dev = crtc->dev;
2695
	struct drm_i915_private *dev_priv = dev->dev_private;
2706
	struct drm_i915_private *dev_priv = dev->dev_private;
2696
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2707
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2697
	enum pipe pipe = intel_crtc->pipe;
2708
	enum pipe pipe = intel_crtc->pipe;
2698
	struct drm_framebuffer *old_fb = crtc->primary->fb;
2709
	struct drm_framebuffer *old_fb = crtc->primary->fb;
2699
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2710
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2700
	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
2711
	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
2701
	int ret;
2712
	int ret;
2702
 
2713
 
2703
 
2714
 
2704
	/* no fb bound */
2715
	/* no fb bound */
2705
	if (!fb) {
2716
	if (!fb) {
2706
		DRM_ERROR("No FB bound\n");
2717
		DRM_ERROR("No FB bound\n");
2707
		return 0;
2718
		return 0;
2708
	}
2719
	}
2709
 
2720
 
2710
	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2721
	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2711
		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2722
		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2712
			  plane_name(intel_crtc->plane),
2723
			  plane_name(intel_crtc->plane),
2713
				INTEL_INFO(dev)->num_pipes);
2724
				INTEL_INFO(dev)->num_pipes);
2714
		return -EINVAL;
2725
		return -EINVAL;
2715
	}
2726
	}
2716
 
2727
 
2717
	mutex_lock(&dev->struct_mutex);
2728
	mutex_lock(&dev->struct_mutex);
2718
	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
2729
	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
2719
	if (ret == 0)
2730
	if (ret == 0)
2720
		i915_gem_track_fb(old_obj, obj,
2731
		i915_gem_track_fb(old_obj, obj,
2721
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
2732
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
2722
	mutex_unlock(&dev->struct_mutex);
2733
	mutex_unlock(&dev->struct_mutex);
2723
    if (ret != 0) {
2734
    if (ret != 0) {
2724
       DRM_ERROR("pin & fence failed\n");
2735
       DRM_ERROR("pin & fence failed\n");
2725
       return ret;
2736
       return ret;
2726
    }
2737
    }
2727
 
2738
 
2728
	/*
2739
	/*
2729
	 * Update pipe size and adjust fitter if needed: the reason for this is
2740
	 * Update pipe size and adjust fitter if needed: the reason for this is
2730
	 * that in compute_mode_changes we check the native mode (not the pfit
2741
	 * that in compute_mode_changes we check the native mode (not the pfit
2731
	 * mode) to see if we can flip rather than do a full mode set. In the
2742
	 * mode) to see if we can flip rather than do a full mode set. In the
2732
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
2743
	 * fastboot case, we'll flip, but if we don't update the pipesrc and
2733
	 * pfit state, we'll end up with a big fb scanned out into the wrong
2744
	 * pfit state, we'll end up with a big fb scanned out into the wrong
2734
	 * sized surface.
2745
	 * sized surface.
2735
	 *
2746
	 *
2736
	 * To fix this properly, we need to hoist the checks up into
2747
	 * To fix this properly, we need to hoist the checks up into
2737
	 * compute_mode_changes (or above), check the actual pfit state and
2748
	 * compute_mode_changes (or above), check the actual pfit state and
2738
	 * whether the platform allows pfit disable with pipe active, and only
2749
	 * whether the platform allows pfit disable with pipe active, and only
2739
	 * then update the pipesrc and pfit state, even on the flip path.
2750
	 * then update the pipesrc and pfit state, even on the flip path.
2740
	 */
2751
	 */
2741
	if (i915.fastboot) {
2752
	if (i915.fastboot) {
2742
		const struct drm_display_mode *adjusted_mode =
2753
		const struct drm_display_mode *adjusted_mode =
2743
			&intel_crtc->config.adjusted_mode;
2754
			&intel_crtc->config.adjusted_mode;
2744
 
2755
 
2745
		I915_WRITE(PIPESRC(intel_crtc->pipe),
2756
		I915_WRITE(PIPESRC(intel_crtc->pipe),
2746
			   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2757
			   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2747
			   (adjusted_mode->crtc_vdisplay - 1));
2758
			   (adjusted_mode->crtc_vdisplay - 1));
2748
		if (!intel_crtc->config.pch_pfit.enabled &&
2759
		if (!intel_crtc->config.pch_pfit.enabled &&
2749
		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2760
		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2750
		     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2761
		     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2751
			I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2762
			I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2752
			I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2763
			I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2753
			I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2764
			I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2754
		}
2765
		}
2755
		intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2766
		intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2756
		intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2767
		intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2757
	}
2768
	}
2758
 
2769
 
2759
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2770
	dev_priv->display.update_primary_plane(crtc, fb, x, y);
2760
 
2771
 
2761
	if (intel_crtc->active)
2772
	if (intel_crtc->active)
2762
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
2773
		intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
2763
 
2774
 
2764
	crtc->primary->fb = fb;
2775
	crtc->primary->fb = fb;
2765
	crtc->x = x;
2776
	crtc->x = x;
2766
	crtc->y = y;
2777
	crtc->y = y;
2767
 
2778
 
2768
	if (old_fb) {
2779
	if (old_fb) {
2769
		if (intel_crtc->active && old_fb != fb)
2780
		if (intel_crtc->active && old_fb != fb)
2770
		intel_wait_for_vblank(dev, intel_crtc->pipe);
2781
		intel_wait_for_vblank(dev, intel_crtc->pipe);
2771
		mutex_lock(&dev->struct_mutex);
2782
		mutex_lock(&dev->struct_mutex);
2772
		intel_unpin_fb_obj(old_obj);
2783
		intel_unpin_fb_obj(old_obj);
2773
		mutex_unlock(&dev->struct_mutex);
2784
		mutex_unlock(&dev->struct_mutex);
2774
	}
2785
	}
2775
 
2786
 
2776
	mutex_lock(&dev->struct_mutex);
2787
	mutex_lock(&dev->struct_mutex);
2777
	intel_update_fbc(dev);
2788
	intel_update_fbc(dev);
2778
	mutex_unlock(&dev->struct_mutex);
2789
	mutex_unlock(&dev->struct_mutex);
2779
 
2790
 
2780
    return 0;
2791
    return 0;
2781
}
2792
}
2782
 
2793
 
2783
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2794
static void intel_fdi_normal_train(struct drm_crtc *crtc)
2784
{
2795
{
2785
	struct drm_device *dev = crtc->dev;
2796
	struct drm_device *dev = crtc->dev;
2786
	struct drm_i915_private *dev_priv = dev->dev_private;
2797
	struct drm_i915_private *dev_priv = dev->dev_private;
2787
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2798
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2788
	int pipe = intel_crtc->pipe;
2799
	int pipe = intel_crtc->pipe;
2789
	u32 reg, temp;
2800
	u32 reg, temp;
2790
 
2801
 
2791
	/* enable normal train */
2802
	/* enable normal train */
2792
	reg = FDI_TX_CTL(pipe);
2803
	reg = FDI_TX_CTL(pipe);
2793
	temp = I915_READ(reg);
2804
	temp = I915_READ(reg);
2794
	if (IS_IVYBRIDGE(dev)) {
2805
	if (IS_IVYBRIDGE(dev)) {
2795
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2806
		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2796
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2807
		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2797
	} else {
2808
	} else {
2798
		temp &= ~FDI_LINK_TRAIN_NONE;
2809
		temp &= ~FDI_LINK_TRAIN_NONE;
2799
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2810
		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2800
	}
2811
	}
2801
	I915_WRITE(reg, temp);
2812
	I915_WRITE(reg, temp);
2802
 
2813
 
2803
	reg = FDI_RX_CTL(pipe);
2814
	reg = FDI_RX_CTL(pipe);
2804
	temp = I915_READ(reg);
2815
	temp = I915_READ(reg);
2805
	if (HAS_PCH_CPT(dev)) {
2816
	if (HAS_PCH_CPT(dev)) {
2806
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2817
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2807
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2818
		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2808
	} else {
2819
	} else {
2809
		temp &= ~FDI_LINK_TRAIN_NONE;
2820
		temp &= ~FDI_LINK_TRAIN_NONE;
2810
		temp |= FDI_LINK_TRAIN_NONE;
2821
		temp |= FDI_LINK_TRAIN_NONE;
2811
	}
2822
	}
2812
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2823
	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2813
 
2824
 
2814
	/* wait one idle pattern time */
2825
	/* wait one idle pattern time */
2815
	POSTING_READ(reg);
2826
	POSTING_READ(reg);
2816
	udelay(1000);
2827
	udelay(1000);
2817
 
2828
 
2818
	/* IVB wants error correction enabled */
2829
	/* IVB wants error correction enabled */
2819
	if (IS_IVYBRIDGE(dev))
2830
	if (IS_IVYBRIDGE(dev))
2820
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2831
		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2821
			   FDI_FE_ERRC_ENABLE);
2832
			   FDI_FE_ERRC_ENABLE);
2822
}
2833
}
2823
 
2834
 
2824
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2835
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2825
{
2836
{
2826
	return crtc->base.enabled && crtc->active &&
2837
	return crtc->base.enabled && crtc->active &&
2827
		crtc->config.has_pch_encoder;
2838
		crtc->config.has_pch_encoder;
2828
}
2839
}
2829
 
2840
 
2830
static void ivb_modeset_global_resources(struct drm_device *dev)
2841
static void ivb_modeset_global_resources(struct drm_device *dev)
2831
{
2842
{
2832
	struct drm_i915_private *dev_priv = dev->dev_private;
2843
	struct drm_i915_private *dev_priv = dev->dev_private;
2833
	struct intel_crtc *pipe_B_crtc =
2844
	struct intel_crtc *pipe_B_crtc =
2834
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2845
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2835
	struct intel_crtc *pipe_C_crtc =
2846
	struct intel_crtc *pipe_C_crtc =
2836
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2847
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2837
	uint32_t temp;
2848
	uint32_t temp;
2838
 
2849
 
2839
	/*
2850
	/*
2840
	 * When everything is off disable fdi C so that we could enable fdi B
2851
	 * When everything is off disable fdi C so that we could enable fdi B
2841
	 * with all lanes. Note that we don't care about enabled pipes without
2852
	 * with all lanes. Note that we don't care about enabled pipes without
2842
	 * an enabled pch encoder.
2853
	 * an enabled pch encoder.
2843
	 */
2854
	 */
2844
	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2855
	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2845
	    !pipe_has_enabled_pch(pipe_C_crtc)) {
2856
	    !pipe_has_enabled_pch(pipe_C_crtc)) {
2846
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2857
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2847
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2858
		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2848
 
2859
 
2849
		temp = I915_READ(SOUTH_CHICKEN1);
2860
		temp = I915_READ(SOUTH_CHICKEN1);
2850
		temp &= ~FDI_BC_BIFURCATION_SELECT;
2861
		temp &= ~FDI_BC_BIFURCATION_SELECT;
2851
		DRM_DEBUG_KMS("disabling fdi C rx\n");
2862
		DRM_DEBUG_KMS("disabling fdi C rx\n");
2852
		I915_WRITE(SOUTH_CHICKEN1, temp);
2863
		I915_WRITE(SOUTH_CHICKEN1, temp);
2853
	}
2864
	}
2854
}
2865
}
2855
 
2866
 
2856
/* The FDI link training functions for ILK/Ibexpeak. */
2867
/* The FDI link training functions for ILK/Ibexpeak. */
2857
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2868
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2858
{
2869
{
2859
    struct drm_device *dev = crtc->dev;
2870
    struct drm_device *dev = crtc->dev;
2860
    struct drm_i915_private *dev_priv = dev->dev_private;
2871
    struct drm_i915_private *dev_priv = dev->dev_private;
2861
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2872
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2862
    int pipe = intel_crtc->pipe;
2873
    int pipe = intel_crtc->pipe;
2863
    u32 reg, temp, tries;
2874
    u32 reg, temp, tries;
2864
 
2875
 
2865
	/* FDI needs bits from pipe first */
2876
	/* FDI needs bits from pipe first */
2866
    assert_pipe_enabled(dev_priv, pipe);
2877
    assert_pipe_enabled(dev_priv, pipe);
2867
 
2878
 
2868
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2879
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2869
       for train result */
2880
       for train result */
2870
    reg = FDI_RX_IMR(pipe);
2881
    reg = FDI_RX_IMR(pipe);
2871
    temp = I915_READ(reg);
2882
    temp = I915_READ(reg);
2872
    temp &= ~FDI_RX_SYMBOL_LOCK;
2883
    temp &= ~FDI_RX_SYMBOL_LOCK;
2873
    temp &= ~FDI_RX_BIT_LOCK;
2884
    temp &= ~FDI_RX_BIT_LOCK;
2874
    I915_WRITE(reg, temp);
2885
    I915_WRITE(reg, temp);
2875
    I915_READ(reg);
2886
    I915_READ(reg);
2876
    udelay(150);
2887
    udelay(150);
2877
 
2888
 
2878
    /* enable CPU FDI TX and PCH FDI RX */
2889
    /* enable CPU FDI TX and PCH FDI RX */
2879
    reg = FDI_TX_CTL(pipe);
2890
    reg = FDI_TX_CTL(pipe);
2880
    temp = I915_READ(reg);
2891
    temp = I915_READ(reg);
2881
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2892
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2882
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2893
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2883
    temp &= ~FDI_LINK_TRAIN_NONE;
2894
    temp &= ~FDI_LINK_TRAIN_NONE;
2884
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2895
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2885
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2896
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2886
 
2897
 
2887
    reg = FDI_RX_CTL(pipe);
2898
    reg = FDI_RX_CTL(pipe);
2888
    temp = I915_READ(reg);
2899
    temp = I915_READ(reg);
2889
    temp &= ~FDI_LINK_TRAIN_NONE;
2900
    temp &= ~FDI_LINK_TRAIN_NONE;
2890
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2901
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2891
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2902
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
2892
 
2903
 
2893
    POSTING_READ(reg);
2904
    POSTING_READ(reg);
2894
    udelay(150);
2905
    udelay(150);
2895
 
2906
 
2896
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2907
    /* Ironlake workaround, enable clock pointer after FDI enable*/
2897
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2908
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2898
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2909
        I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2899
               FDI_RX_PHASE_SYNC_POINTER_EN);
2910
               FDI_RX_PHASE_SYNC_POINTER_EN);
2900
 
2911
 
2901
    reg = FDI_RX_IIR(pipe);
2912
    reg = FDI_RX_IIR(pipe);
2902
    for (tries = 0; tries < 5; tries++) {
2913
    for (tries = 0; tries < 5; tries++) {
2903
        temp = I915_READ(reg);
2914
        temp = I915_READ(reg);
2904
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2915
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2905
 
2916
 
2906
        if ((temp & FDI_RX_BIT_LOCK)) {
2917
        if ((temp & FDI_RX_BIT_LOCK)) {
2907
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2918
            DRM_DEBUG_KMS("FDI train 1 done.\n");
2908
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2919
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2909
            break;
2920
            break;
2910
        }
2921
        }
2911
    }
2922
    }
2912
    if (tries == 5)
2923
    if (tries == 5)
2913
        DRM_ERROR("FDI train 1 fail!\n");
2924
        DRM_ERROR("FDI train 1 fail!\n");
2914
 
2925
 
2915
    /* Train 2 */
2926
    /* Train 2 */
2916
    reg = FDI_TX_CTL(pipe);
2927
    reg = FDI_TX_CTL(pipe);
2917
    temp = I915_READ(reg);
2928
    temp = I915_READ(reg);
2918
    temp &= ~FDI_LINK_TRAIN_NONE;
2929
    temp &= ~FDI_LINK_TRAIN_NONE;
2919
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2930
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2920
    I915_WRITE(reg, temp);
2931
    I915_WRITE(reg, temp);
2921
 
2932
 
2922
    reg = FDI_RX_CTL(pipe);
2933
    reg = FDI_RX_CTL(pipe);
2923
    temp = I915_READ(reg);
2934
    temp = I915_READ(reg);
2924
    temp &= ~FDI_LINK_TRAIN_NONE;
2935
    temp &= ~FDI_LINK_TRAIN_NONE;
2925
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2936
    temp |= FDI_LINK_TRAIN_PATTERN_2;
2926
    I915_WRITE(reg, temp);
2937
    I915_WRITE(reg, temp);
2927
 
2938
 
2928
    POSTING_READ(reg);
2939
    POSTING_READ(reg);
2929
    udelay(150);
2940
    udelay(150);
2930
 
2941
 
2931
    reg = FDI_RX_IIR(pipe);
2942
    reg = FDI_RX_IIR(pipe);
2932
    for (tries = 0; tries < 5; tries++) {
2943
    for (tries = 0; tries < 5; tries++) {
2933
        temp = I915_READ(reg);
2944
        temp = I915_READ(reg);
2934
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2945
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2935
 
2946
 
2936
        if (temp & FDI_RX_SYMBOL_LOCK) {
2947
        if (temp & FDI_RX_SYMBOL_LOCK) {
2937
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2948
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2938
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2949
            DRM_DEBUG_KMS("FDI train 2 done.\n");
2939
            break;
2950
            break;
2940
        }
2951
        }
2941
    }
2952
    }
2942
    if (tries == 5)
2953
    if (tries == 5)
2943
        DRM_ERROR("FDI train 2 fail!\n");
2954
        DRM_ERROR("FDI train 2 fail!\n");
2944
 
2955
 
2945
    DRM_DEBUG_KMS("FDI train done\n");
2956
    DRM_DEBUG_KMS("FDI train done\n");
2946
 
2957
 
2947
}
2958
}
2948
 
2959
 
2949
static const int snb_b_fdi_train_param[] = {
2960
static const int snb_b_fdi_train_param[] = {
2950
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2961
    FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2951
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2962
    FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2952
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2963
    FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2953
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2964
    FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2954
};
2965
};
2955
 
2966
 
2956
/* The FDI link training functions for SNB/Cougarpoint. */
2967
/* The FDI link training functions for SNB/Cougarpoint. */
2957
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2968
static void gen6_fdi_link_train(struct drm_crtc *crtc)
2958
{
2969
{
2959
    struct drm_device *dev = crtc->dev;
2970
    struct drm_device *dev = crtc->dev;
2960
    struct drm_i915_private *dev_priv = dev->dev_private;
2971
    struct drm_i915_private *dev_priv = dev->dev_private;
2961
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2972
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2962
    int pipe = intel_crtc->pipe;
2973
    int pipe = intel_crtc->pipe;
2963
	u32 reg, temp, i, retry;
2974
	u32 reg, temp, i, retry;
2964
 
2975
 
2965
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2976
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2966
       for train result */
2977
       for train result */
2967
    reg = FDI_RX_IMR(pipe);
2978
    reg = FDI_RX_IMR(pipe);
2968
    temp = I915_READ(reg);
2979
    temp = I915_READ(reg);
2969
    temp &= ~FDI_RX_SYMBOL_LOCK;
2980
    temp &= ~FDI_RX_SYMBOL_LOCK;
2970
    temp &= ~FDI_RX_BIT_LOCK;
2981
    temp &= ~FDI_RX_BIT_LOCK;
2971
    I915_WRITE(reg, temp);
2982
    I915_WRITE(reg, temp);
2972
 
2983
 
2973
    POSTING_READ(reg);
2984
    POSTING_READ(reg);
2974
    udelay(150);
2985
    udelay(150);
2975
 
2986
 
2976
    /* enable CPU FDI TX and PCH FDI RX */
2987
    /* enable CPU FDI TX and PCH FDI RX */
2977
    reg = FDI_TX_CTL(pipe);
2988
    reg = FDI_TX_CTL(pipe);
2978
    temp = I915_READ(reg);
2989
    temp = I915_READ(reg);
2979
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2990
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2980
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2991
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2981
    temp &= ~FDI_LINK_TRAIN_NONE;
2992
    temp &= ~FDI_LINK_TRAIN_NONE;
2982
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2993
    temp |= FDI_LINK_TRAIN_PATTERN_1;
2983
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2994
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2984
    /* SNB-B */
2995
    /* SNB-B */
2985
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2996
    temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2986
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2997
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
2987
 
2998
 
2988
	I915_WRITE(FDI_RX_MISC(pipe),
2999
	I915_WRITE(FDI_RX_MISC(pipe),
2989
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3000
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2990
 
3001
 
2991
    reg = FDI_RX_CTL(pipe);
3002
    reg = FDI_RX_CTL(pipe);
2992
    temp = I915_READ(reg);
3003
    temp = I915_READ(reg);
2993
    if (HAS_PCH_CPT(dev)) {
3004
    if (HAS_PCH_CPT(dev)) {
2994
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3005
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2995
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3006
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2996
    } else {
3007
    } else {
2997
        temp &= ~FDI_LINK_TRAIN_NONE;
3008
        temp &= ~FDI_LINK_TRAIN_NONE;
2998
        temp |= FDI_LINK_TRAIN_PATTERN_1;
3009
        temp |= FDI_LINK_TRAIN_PATTERN_1;
2999
    }
3010
    }
3000
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3011
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3001
 
3012
 
3002
    POSTING_READ(reg);
3013
    POSTING_READ(reg);
3003
    udelay(150);
3014
    udelay(150);
3004
 
3015
 
3005
	for (i = 0; i < 4; i++) {
3016
	for (i = 0; i < 4; i++) {
3006
        reg = FDI_TX_CTL(pipe);
3017
        reg = FDI_TX_CTL(pipe);
3007
        temp = I915_READ(reg);
3018
        temp = I915_READ(reg);
3008
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3019
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3009
        temp |= snb_b_fdi_train_param[i];
3020
        temp |= snb_b_fdi_train_param[i];
3010
        I915_WRITE(reg, temp);
3021
        I915_WRITE(reg, temp);
3011
 
3022
 
3012
        POSTING_READ(reg);
3023
        POSTING_READ(reg);
3013
        udelay(500);
3024
        udelay(500);
3014
 
3025
 
3015
		for (retry = 0; retry < 5; retry++) {
3026
		for (retry = 0; retry < 5; retry++) {
3016
        reg = FDI_RX_IIR(pipe);
3027
        reg = FDI_RX_IIR(pipe);
3017
        temp = I915_READ(reg);
3028
        temp = I915_READ(reg);
3018
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3029
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3019
        if (temp & FDI_RX_BIT_LOCK) {
3030
        if (temp & FDI_RX_BIT_LOCK) {
3020
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3031
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3021
            DRM_DEBUG_KMS("FDI train 1 done.\n");
3032
            DRM_DEBUG_KMS("FDI train 1 done.\n");
3022
            break;
3033
            break;
3023
        }
3034
        }
3024
			udelay(50);
3035
			udelay(50);
3025
		}
3036
		}
3026
		if (retry < 5)
3037
		if (retry < 5)
3027
			break;
3038
			break;
3028
    }
3039
    }
3029
    if (i == 4)
3040
    if (i == 4)
3030
        DRM_ERROR("FDI train 1 fail!\n");
3041
        DRM_ERROR("FDI train 1 fail!\n");
3031
 
3042
 
3032
    /* Train 2 */
3043
    /* Train 2 */
3033
    reg = FDI_TX_CTL(pipe);
3044
    reg = FDI_TX_CTL(pipe);
3034
    temp = I915_READ(reg);
3045
    temp = I915_READ(reg);
3035
    temp &= ~FDI_LINK_TRAIN_NONE;
3046
    temp &= ~FDI_LINK_TRAIN_NONE;
3036
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3047
    temp |= FDI_LINK_TRAIN_PATTERN_2;
3037
    if (IS_GEN6(dev)) {
3048
    if (IS_GEN6(dev)) {
3038
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3049
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3039
        /* SNB-B */
3050
        /* SNB-B */
3040
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3051
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3041
    }
3052
    }
3042
    I915_WRITE(reg, temp);
3053
    I915_WRITE(reg, temp);
3043
 
3054
 
3044
    reg = FDI_RX_CTL(pipe);
3055
    reg = FDI_RX_CTL(pipe);
3045
    temp = I915_READ(reg);
3056
    temp = I915_READ(reg);
3046
    if (HAS_PCH_CPT(dev)) {
3057
    if (HAS_PCH_CPT(dev)) {
3047
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3058
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3048
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3059
        temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3049
    } else {
3060
    } else {
3050
        temp &= ~FDI_LINK_TRAIN_NONE;
3061
        temp &= ~FDI_LINK_TRAIN_NONE;
3051
        temp |= FDI_LINK_TRAIN_PATTERN_2;
3062
        temp |= FDI_LINK_TRAIN_PATTERN_2;
3052
    }
3063
    }
3053
    I915_WRITE(reg, temp);
3064
    I915_WRITE(reg, temp);
3054
 
3065
 
3055
    POSTING_READ(reg);
3066
    POSTING_READ(reg);
3056
    udelay(150);
3067
    udelay(150);
3057
 
3068
 
3058
	for (i = 0; i < 4; i++) {
3069
	for (i = 0; i < 4; i++) {
3059
        reg = FDI_TX_CTL(pipe);
3070
        reg = FDI_TX_CTL(pipe);
3060
        temp = I915_READ(reg);
3071
        temp = I915_READ(reg);
3061
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3072
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3062
        temp |= snb_b_fdi_train_param[i];
3073
        temp |= snb_b_fdi_train_param[i];
3063
        I915_WRITE(reg, temp);
3074
        I915_WRITE(reg, temp);
3064
 
3075
 
3065
        POSTING_READ(reg);
3076
        POSTING_READ(reg);
3066
        udelay(500);
3077
        udelay(500);
3067
 
3078
 
3068
		for (retry = 0; retry < 5; retry++) {
3079
		for (retry = 0; retry < 5; retry++) {
3069
        reg = FDI_RX_IIR(pipe);
3080
        reg = FDI_RX_IIR(pipe);
3070
        temp = I915_READ(reg);
3081
        temp = I915_READ(reg);
3071
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3082
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3072
        if (temp & FDI_RX_SYMBOL_LOCK) {
3083
        if (temp & FDI_RX_SYMBOL_LOCK) {
3073
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3084
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3074
            DRM_DEBUG_KMS("FDI train 2 done.\n");
3085
            DRM_DEBUG_KMS("FDI train 2 done.\n");
3075
            break;
3086
            break;
3076
        }
3087
        }
3077
			udelay(50);
3088
			udelay(50);
3078
		}
3089
		}
3079
		if (retry < 5)
3090
		if (retry < 5)
3080
			break;
3091
			break;
3081
    }
3092
    }
3082
    if (i == 4)
3093
    if (i == 4)
3083
        DRM_ERROR("FDI train 2 fail!\n");
3094
        DRM_ERROR("FDI train 2 fail!\n");
3084
 
3095
 
3085
    DRM_DEBUG_KMS("FDI train done.\n");
3096
    DRM_DEBUG_KMS("FDI train done.\n");
3086
}
3097
}
3087
 
3098
 
3088
/* Manual link training for Ivy Bridge A0 parts */
3099
/* Manual link training for Ivy Bridge A0 parts */
3089
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3100
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3090
{
3101
{
3091
    struct drm_device *dev = crtc->dev;
3102
    struct drm_device *dev = crtc->dev;
3092
    struct drm_i915_private *dev_priv = dev->dev_private;
3103
    struct drm_i915_private *dev_priv = dev->dev_private;
3093
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3104
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3094
    int pipe = intel_crtc->pipe;
3105
    int pipe = intel_crtc->pipe;
3095
	u32 reg, temp, i, j;
3106
	u32 reg, temp, i, j;
3096
 
3107
 
3097
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3108
    /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3098
       for train result */
3109
       for train result */
3099
    reg = FDI_RX_IMR(pipe);
3110
    reg = FDI_RX_IMR(pipe);
3100
    temp = I915_READ(reg);
3111
    temp = I915_READ(reg);
3101
    temp &= ~FDI_RX_SYMBOL_LOCK;
3112
    temp &= ~FDI_RX_SYMBOL_LOCK;
3102
    temp &= ~FDI_RX_BIT_LOCK;
3113
    temp &= ~FDI_RX_BIT_LOCK;
3103
    I915_WRITE(reg, temp);
3114
    I915_WRITE(reg, temp);
3104
 
3115
 
3105
    POSTING_READ(reg);
3116
    POSTING_READ(reg);
3106
    udelay(150);
3117
    udelay(150);
3107
 
3118
 
3108
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3119
	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3109
		      I915_READ(FDI_RX_IIR(pipe)));
3120
		      I915_READ(FDI_RX_IIR(pipe)));
3110
 
3121
 
3111
	/* Try each vswing and preemphasis setting twice before moving on */
3122
	/* Try each vswing and preemphasis setting twice before moving on */
3112
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3123
	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3113
		/* disable first in case we need to retry */
3124
		/* disable first in case we need to retry */
3114
		reg = FDI_TX_CTL(pipe);
3125
		reg = FDI_TX_CTL(pipe);
3115
		temp = I915_READ(reg);
3126
		temp = I915_READ(reg);
3116
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3127
		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3117
		temp &= ~FDI_TX_ENABLE;
3128
		temp &= ~FDI_TX_ENABLE;
3118
		I915_WRITE(reg, temp);
3129
		I915_WRITE(reg, temp);
3119
 
3130
 
3120
		reg = FDI_RX_CTL(pipe);
3131
		reg = FDI_RX_CTL(pipe);
3121
		temp = I915_READ(reg);
3132
		temp = I915_READ(reg);
3122
		temp &= ~FDI_LINK_TRAIN_AUTO;
3133
		temp &= ~FDI_LINK_TRAIN_AUTO;
3123
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3134
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3124
		temp &= ~FDI_RX_ENABLE;
3135
		temp &= ~FDI_RX_ENABLE;
3125
		I915_WRITE(reg, temp);
3136
		I915_WRITE(reg, temp);
3126
 
3137
 
3127
    /* enable CPU FDI TX and PCH FDI RX */
3138
    /* enable CPU FDI TX and PCH FDI RX */
3128
    reg = FDI_TX_CTL(pipe);
3139
    reg = FDI_TX_CTL(pipe);
3129
    temp = I915_READ(reg);
3140
    temp = I915_READ(reg);
3130
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3141
	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3131
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3142
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3132
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3143
    temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3133
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3144
    temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3134
		temp |= snb_b_fdi_train_param[j/2];
3145
		temp |= snb_b_fdi_train_param[j/2];
3135
	temp |= FDI_COMPOSITE_SYNC;
3146
	temp |= FDI_COMPOSITE_SYNC;
3136
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3147
    I915_WRITE(reg, temp | FDI_TX_ENABLE);
3137
 
3148
 
3138
	I915_WRITE(FDI_RX_MISC(pipe),
3149
	I915_WRITE(FDI_RX_MISC(pipe),
3139
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3150
		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3140
 
3151
 
3141
    reg = FDI_RX_CTL(pipe);
3152
    reg = FDI_RX_CTL(pipe);
3142
    temp = I915_READ(reg);
3153
    temp = I915_READ(reg);
3143
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3154
    temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3144
	temp |= FDI_COMPOSITE_SYNC;
3155
	temp |= FDI_COMPOSITE_SYNC;
3145
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3156
    I915_WRITE(reg, temp | FDI_RX_ENABLE);
3146
 
3157
 
3147
    POSTING_READ(reg);
3158
    POSTING_READ(reg);
3148
		udelay(1); /* should be 0.5us */
3159
		udelay(1); /* should be 0.5us */
3149
 
3160
 
3150
	for (i = 0; i < 4; i++) {
3161
	for (i = 0; i < 4; i++) {
3151
        reg = FDI_RX_IIR(pipe);
3162
        reg = FDI_RX_IIR(pipe);
3152
        temp = I915_READ(reg);
3163
        temp = I915_READ(reg);
3153
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3164
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3154
 
3165
 
3155
        if (temp & FDI_RX_BIT_LOCK ||
3166
        if (temp & FDI_RX_BIT_LOCK ||
3156
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3167
            (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3157
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3168
            I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3158
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3169
				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3159
					      i);
3170
					      i);
3160
            break;
3171
            break;
3161
        }
3172
        }
3162
			udelay(1); /* should be 0.5us */
3173
			udelay(1); /* should be 0.5us */
3163
		}
3174
		}
3164
		if (i == 4) {
3175
		if (i == 4) {
3165
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3176
			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3166
			continue;
3177
			continue;
3167
    }
3178
    }
3168
 
3179
 
3169
    /* Train 2 */
3180
    /* Train 2 */
3170
    reg = FDI_TX_CTL(pipe);
3181
    reg = FDI_TX_CTL(pipe);
3171
    temp = I915_READ(reg);
3182
    temp = I915_READ(reg);
3172
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3183
    temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3173
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3184
    temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3174
    I915_WRITE(reg, temp);
3185
    I915_WRITE(reg, temp);
3175
 
3186
 
3176
    reg = FDI_RX_CTL(pipe);
3187
    reg = FDI_RX_CTL(pipe);
3177
    temp = I915_READ(reg);
3188
    temp = I915_READ(reg);
3178
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3189
    temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3179
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3190
    temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3180
    I915_WRITE(reg, temp);
3191
    I915_WRITE(reg, temp);
3181
 
3192
 
3182
    POSTING_READ(reg);
3193
    POSTING_READ(reg);
3183
		udelay(2); /* should be 1.5us */
3194
		udelay(2); /* should be 1.5us */
3184
 
3195
 
3185
	for (i = 0; i < 4; i++) {
3196
	for (i = 0; i < 4; i++) {
3186
        reg = FDI_RX_IIR(pipe);
3197
        reg = FDI_RX_IIR(pipe);
3187
        temp = I915_READ(reg);
3198
        temp = I915_READ(reg);
3188
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3199
        DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3189
 
3200
 
3190
			if (temp & FDI_RX_SYMBOL_LOCK ||
3201
			if (temp & FDI_RX_SYMBOL_LOCK ||
3191
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3202
			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3192
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3203
            I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3193
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3204
				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3194
					      i);
3205
					      i);
3195
				goto train_done;
3206
				goto train_done;
3196
        }
3207
        }
3197
			udelay(2); /* should be 1.5us */
3208
			udelay(2); /* should be 1.5us */
3198
    }
3209
    }
3199
    if (i == 4)
3210
    if (i == 4)
3200
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3211
			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3201
	}
3212
	}
3202
 
3213
 
3203
train_done:
3214
train_done:
3204
    DRM_DEBUG_KMS("FDI train done.\n");
3215
    DRM_DEBUG_KMS("FDI train done.\n");
3205
}
3216
}
3206
 
3217
 
3207
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3218
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3208
{
3219
{
3209
	struct drm_device *dev = intel_crtc->base.dev;
3220
	struct drm_device *dev = intel_crtc->base.dev;
3210
	struct drm_i915_private *dev_priv = dev->dev_private;
3221
	struct drm_i915_private *dev_priv = dev->dev_private;
3211
	int pipe = intel_crtc->pipe;
3222
	int pipe = intel_crtc->pipe;
3212
	u32 reg, temp;
3223
	u32 reg, temp;
3213
 
3224
 
3214
 
3225
 
3215
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3226
	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3216
	reg = FDI_RX_CTL(pipe);
3227
	reg = FDI_RX_CTL(pipe);
3217
	temp = I915_READ(reg);
3228
	temp = I915_READ(reg);
3218
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3229
	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3219
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3230
	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3220
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3231
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3221
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3232
	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3222
 
3233
 
3223
	POSTING_READ(reg);
3234
	POSTING_READ(reg);
3224
	udelay(200);
3235
	udelay(200);
3225
 
3236
 
3226
	/* Switch from Rawclk to PCDclk */
3237
	/* Switch from Rawclk to PCDclk */
3227
	temp = I915_READ(reg);
3238
	temp = I915_READ(reg);
3228
	I915_WRITE(reg, temp | FDI_PCDCLK);
3239
	I915_WRITE(reg, temp | FDI_PCDCLK);
3229
 
3240
 
3230
	POSTING_READ(reg);
3241
	POSTING_READ(reg);
3231
	udelay(200);
3242
	udelay(200);
3232
 
3243
 
3233
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3244
	/* Enable CPU FDI TX PLL, always on for Ironlake */
3234
	reg = FDI_TX_CTL(pipe);
3245
	reg = FDI_TX_CTL(pipe);
3235
	temp = I915_READ(reg);
3246
	temp = I915_READ(reg);
3236
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3247
	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3237
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3248
		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3238
 
3249
 
3239
		POSTING_READ(reg);
3250
		POSTING_READ(reg);
3240
		udelay(100);
3251
		udelay(100);
3241
	}
3252
	}
3242
}
3253
}
3243
 
3254
 
3244
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3255
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3245
{
3256
{
3246
	struct drm_device *dev = intel_crtc->base.dev;
3257
	struct drm_device *dev = intel_crtc->base.dev;
3247
	struct drm_i915_private *dev_priv = dev->dev_private;
3258
	struct drm_i915_private *dev_priv = dev->dev_private;
3248
	int pipe = intel_crtc->pipe;
3259
	int pipe = intel_crtc->pipe;
3249
	u32 reg, temp;
3260
	u32 reg, temp;
3250
 
3261
 
3251
	/* Switch from PCDclk to Rawclk */
3262
	/* Switch from PCDclk to Rawclk */
3252
	reg = FDI_RX_CTL(pipe);
3263
	reg = FDI_RX_CTL(pipe);
3253
	temp = I915_READ(reg);
3264
	temp = I915_READ(reg);
3254
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3265
	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3255
 
3266
 
3256
	/* Disable CPU FDI TX PLL */
3267
	/* Disable CPU FDI TX PLL */
3257
	reg = FDI_TX_CTL(pipe);
3268
	reg = FDI_TX_CTL(pipe);
3258
	temp = I915_READ(reg);
3269
	temp = I915_READ(reg);
3259
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3270
	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3260
 
3271
 
3261
	POSTING_READ(reg);
3272
	POSTING_READ(reg);
3262
	udelay(100);
3273
	udelay(100);
3263
 
3274
 
3264
	reg = FDI_RX_CTL(pipe);
3275
	reg = FDI_RX_CTL(pipe);
3265
	temp = I915_READ(reg);
3276
	temp = I915_READ(reg);
3266
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3277
	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3267
 
3278
 
3268
	/* Wait for the clocks to turn off. */
3279
	/* Wait for the clocks to turn off. */
3269
	POSTING_READ(reg);
3280
	POSTING_READ(reg);
3270
	udelay(100);
3281
	udelay(100);
3271
}
3282
}
3272
 
3283
 
3273
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3284
static void ironlake_fdi_disable(struct drm_crtc *crtc)
3274
{
3285
{
3275
	struct drm_device *dev = crtc->dev;
3286
	struct drm_device *dev = crtc->dev;
3276
	struct drm_i915_private *dev_priv = dev->dev_private;
3287
	struct drm_i915_private *dev_priv = dev->dev_private;
3277
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3288
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3278
	int pipe = intel_crtc->pipe;
3289
	int pipe = intel_crtc->pipe;
3279
	u32 reg, temp;
3290
	u32 reg, temp;
3280
 
3291
 
3281
	/* disable CPU FDI tx and PCH FDI rx */
3292
	/* disable CPU FDI tx and PCH FDI rx */
3282
	reg = FDI_TX_CTL(pipe);
3293
	reg = FDI_TX_CTL(pipe);
3283
	temp = I915_READ(reg);
3294
	temp = I915_READ(reg);
3284
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3295
	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3285
	POSTING_READ(reg);
3296
	POSTING_READ(reg);
3286
 
3297
 
3287
	reg = FDI_RX_CTL(pipe);
3298
	reg = FDI_RX_CTL(pipe);
3288
	temp = I915_READ(reg);
3299
	temp = I915_READ(reg);
3289
	temp &= ~(0x7 << 16);
3300
	temp &= ~(0x7 << 16);
3290
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3301
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3291
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3302
	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3292
 
3303
 
3293
	POSTING_READ(reg);
3304
	POSTING_READ(reg);
3294
	udelay(100);
3305
	udelay(100);
3295
 
3306
 
3296
	/* Ironlake workaround, disable clock pointer after downing FDI */
3307
	/* Ironlake workaround, disable clock pointer after downing FDI */
3297
	if (HAS_PCH_IBX(dev))
3308
	if (HAS_PCH_IBX(dev))
3298
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3309
		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3299
 
3310
 
3300
	/* still set train pattern 1 */
3311
	/* still set train pattern 1 */
3301
	reg = FDI_TX_CTL(pipe);
3312
	reg = FDI_TX_CTL(pipe);
3302
	temp = I915_READ(reg);
3313
	temp = I915_READ(reg);
3303
	temp &= ~FDI_LINK_TRAIN_NONE;
3314
	temp &= ~FDI_LINK_TRAIN_NONE;
3304
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3315
	temp |= FDI_LINK_TRAIN_PATTERN_1;
3305
	I915_WRITE(reg, temp);
3316
	I915_WRITE(reg, temp);
3306
 
3317
 
3307
	reg = FDI_RX_CTL(pipe);
3318
	reg = FDI_RX_CTL(pipe);
3308
	temp = I915_READ(reg);
3319
	temp = I915_READ(reg);
3309
	if (HAS_PCH_CPT(dev)) {
3320
	if (HAS_PCH_CPT(dev)) {
3310
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3321
		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3311
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3322
		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3312
	} else {
3323
	} else {
3313
		temp &= ~FDI_LINK_TRAIN_NONE;
3324
		temp &= ~FDI_LINK_TRAIN_NONE;
3314
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3325
		temp |= FDI_LINK_TRAIN_PATTERN_1;
3315
	}
3326
	}
3316
	/* BPC in FDI rx is consistent with that in PIPECONF */
3327
	/* BPC in FDI rx is consistent with that in PIPECONF */
3317
	temp &= ~(0x07 << 16);
3328
	temp &= ~(0x07 << 16);
3318
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3329
	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3319
	I915_WRITE(reg, temp);
3330
	I915_WRITE(reg, temp);
3320
 
3331
 
3321
	POSTING_READ(reg);
3332
	POSTING_READ(reg);
3322
	udelay(100);
3333
	udelay(100);
3323
}
3334
}
3324
 
3335
 
3325
bool intel_has_pending_fb_unpin(struct drm_device *dev)
3336
bool intel_has_pending_fb_unpin(struct drm_device *dev)
3326
{
3337
{
3327
	struct intel_crtc *crtc;
3338
	struct intel_crtc *crtc;
3328
 
3339
 
3329
	/* Note that we don't need to be called with mode_config.lock here
3340
	/* Note that we don't need to be called with mode_config.lock here
3330
	 * as our list of CRTC objects is static for the lifetime of the
3341
	 * as our list of CRTC objects is static for the lifetime of the
3331
	 * device and so cannot disappear as we iterate. Similarly, we can
3342
	 * device and so cannot disappear as we iterate. Similarly, we can
3332
	 * happily treat the predicates as racy, atomic checks as userspace
3343
	 * happily treat the predicates as racy, atomic checks as userspace
3333
	 * cannot claim and pin a new fb without at least acquring the
3344
	 * cannot claim and pin a new fb without at least acquring the
3334
	 * struct_mutex and so serialising with us.
3345
	 * struct_mutex and so serialising with us.
3335
	 */
3346
	 */
3336
	for_each_intel_crtc(dev, crtc) {
3347
	for_each_intel_crtc(dev, crtc) {
3337
		if (atomic_read(&crtc->unpin_work_count) == 0)
3348
		if (atomic_read(&crtc->unpin_work_count) == 0)
3338
			continue;
3349
			continue;
3339
 
3350
 
3340
		if (crtc->unpin_work)
3351
		if (crtc->unpin_work)
3341
			intel_wait_for_vblank(dev, crtc->pipe);
3352
			intel_wait_for_vblank(dev, crtc->pipe);
3342
 
3353
 
3343
		return true;
3354
		return true;
3344
	}
3355
	}
3345
 
3356
 
3346
	return false;
3357
	return false;
3347
}
3358
}
3348
 
3359
 
3349
#if 0
3360
#if 0
3350
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3361
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3351
{
3362
{
3352
	struct drm_device *dev = crtc->dev;
3363
	struct drm_device *dev = crtc->dev;
3353
	struct drm_i915_private *dev_priv = dev->dev_private;
3364
	struct drm_i915_private *dev_priv = dev->dev_private;
3354
 
3365
 
3355
	if (crtc->primary->fb == NULL)
3366
	if (crtc->primary->fb == NULL)
3356
		return;
3367
		return;
3357
 
3368
 
3358
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3369
	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3359
 
3370
 
3360
	WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3371
	WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3361
				   !intel_crtc_has_pending_flip(crtc),
3372
				   !intel_crtc_has_pending_flip(crtc),
3362
				   60*HZ) == 0);
3373
				   60*HZ) == 0);
3363
 
3374
 
3364
	mutex_lock(&dev->struct_mutex);
3375
	mutex_lock(&dev->struct_mutex);
3365
	intel_finish_fb(crtc->primary->fb);
3376
	intel_finish_fb(crtc->primary->fb);
3366
	mutex_unlock(&dev->struct_mutex);
3377
	mutex_unlock(&dev->struct_mutex);
3367
}
3378
}
3368
#endif
3379
#endif
3369
 
3380
 
3370
/* Program iCLKIP clock to the desired frequency */
3381
/* Program iCLKIP clock to the desired frequency */
3371
static void lpt_program_iclkip(struct drm_crtc *crtc)
3382
static void lpt_program_iclkip(struct drm_crtc *crtc)
3372
{
3383
{
3373
	struct drm_device *dev = crtc->dev;
3384
	struct drm_device *dev = crtc->dev;
3374
	struct drm_i915_private *dev_priv = dev->dev_private;
3385
	struct drm_i915_private *dev_priv = dev->dev_private;
3375
	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3386
	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3376
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3387
	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3377
	u32 temp;
3388
	u32 temp;
3378
 
3389
 
3379
	mutex_lock(&dev_priv->dpio_lock);
3390
	mutex_lock(&dev_priv->dpio_lock);
3380
 
3391
 
3381
	/* It is necessary to ungate the pixclk gate prior to programming
3392
	/* It is necessary to ungate the pixclk gate prior to programming
3382
	 * the divisors, and gate it back when it is done.
3393
	 * the divisors, and gate it back when it is done.
3383
	 */
3394
	 */
3384
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3395
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3385
 
3396
 
3386
	/* Disable SSCCTL */
3397
	/* Disable SSCCTL */
3387
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3398
	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3388
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3399
			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3389
				SBI_SSCCTL_DISABLE,
3400
				SBI_SSCCTL_DISABLE,
3390
			SBI_ICLK);
3401
			SBI_ICLK);
3391
 
3402
 
3392
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
3403
	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
3393
	if (clock == 20000) {
3404
	if (clock == 20000) {
3394
		auxdiv = 1;
3405
		auxdiv = 1;
3395
		divsel = 0x41;
3406
		divsel = 0x41;
3396
		phaseinc = 0x20;
3407
		phaseinc = 0x20;
3397
	} else {
3408
	} else {
3398
		/* The iCLK virtual clock root frequency is in MHz,
3409
		/* The iCLK virtual clock root frequency is in MHz,
3399
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3410
		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3400
		 * divisors, it is necessary to divide one by another, so we
3411
		 * divisors, it is necessary to divide one by another, so we
3401
		 * convert the virtual clock precision to KHz here for higher
3412
		 * convert the virtual clock precision to KHz here for higher
3402
		 * precision.
3413
		 * precision.
3403
		 */
3414
		 */
3404
		u32 iclk_virtual_root_freq = 172800 * 1000;
3415
		u32 iclk_virtual_root_freq = 172800 * 1000;
3405
		u32 iclk_pi_range = 64;
3416
		u32 iclk_pi_range = 64;
3406
		u32 desired_divisor, msb_divisor_value, pi_value;
3417
		u32 desired_divisor, msb_divisor_value, pi_value;
3407
 
3418
 
3408
		desired_divisor = (iclk_virtual_root_freq / clock);
3419
		desired_divisor = (iclk_virtual_root_freq / clock);
3409
		msb_divisor_value = desired_divisor / iclk_pi_range;
3420
		msb_divisor_value = desired_divisor / iclk_pi_range;
3410
		pi_value = desired_divisor % iclk_pi_range;
3421
		pi_value = desired_divisor % iclk_pi_range;
3411
 
3422
 
3412
		auxdiv = 0;
3423
		auxdiv = 0;
3413
		divsel = msb_divisor_value - 2;
3424
		divsel = msb_divisor_value - 2;
3414
		phaseinc = pi_value;
3425
		phaseinc = pi_value;
3415
	}
3426
	}
3416
 
3427
 
3417
	/* This should not happen with any sane values */
3428
	/* This should not happen with any sane values */
3418
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3429
	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3419
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3430
		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3420
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3431
	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3421
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3432
		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3422
 
3433
 
3423
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3434
	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3424
			clock,
3435
			clock,
3425
			auxdiv,
3436
			auxdiv,
3426
			divsel,
3437
			divsel,
3427
			phasedir,
3438
			phasedir,
3428
			phaseinc);
3439
			phaseinc);
3429
 
3440
 
3430
	/* Program SSCDIVINTPHASE6 */
3441
	/* Program SSCDIVINTPHASE6 */
3431
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3442
	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3432
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3443
	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3433
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3444
	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3434
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3445
	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3435
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3446
	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3436
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3447
	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3437
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3448
	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3438
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3449
	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3439
 
3450
 
3440
	/* Program SSCAUXDIV */
3451
	/* Program SSCAUXDIV */
3441
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3452
	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3442
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3453
	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3443
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3454
	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3444
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3455
	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3445
 
3456
 
3446
	/* Enable modulator and associated divider */
3457
	/* Enable modulator and associated divider */
3447
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3458
	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3448
	temp &= ~SBI_SSCCTL_DISABLE;
3459
	temp &= ~SBI_SSCCTL_DISABLE;
3449
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3460
	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3450
 
3461
 
3451
	/* Wait for initialization time */
3462
	/* Wait for initialization time */
3452
	udelay(24);
3463
	udelay(24);
3453
 
3464
 
3454
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3465
	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3455
 
3466
 
3456
	mutex_unlock(&dev_priv->dpio_lock);
3467
	mutex_unlock(&dev_priv->dpio_lock);
3457
}
3468
}
3458
 
3469
 
3459
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3470
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3460
						enum pipe pch_transcoder)
3471
						enum pipe pch_transcoder)
3461
{
3472
{
3462
	struct drm_device *dev = crtc->base.dev;
3473
	struct drm_device *dev = crtc->base.dev;
3463
	struct drm_i915_private *dev_priv = dev->dev_private;
3474
	struct drm_i915_private *dev_priv = dev->dev_private;
3464
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3475
	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3465
 
3476
 
3466
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3477
	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3467
		   I915_READ(HTOTAL(cpu_transcoder)));
3478
		   I915_READ(HTOTAL(cpu_transcoder)));
3468
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3479
	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3469
		   I915_READ(HBLANK(cpu_transcoder)));
3480
		   I915_READ(HBLANK(cpu_transcoder)));
3470
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3481
	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3471
		   I915_READ(HSYNC(cpu_transcoder)));
3482
		   I915_READ(HSYNC(cpu_transcoder)));
3472
 
3483
 
3473
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3484
	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3474
		   I915_READ(VTOTAL(cpu_transcoder)));
3485
		   I915_READ(VTOTAL(cpu_transcoder)));
3475
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3486
	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3476
		   I915_READ(VBLANK(cpu_transcoder)));
3487
		   I915_READ(VBLANK(cpu_transcoder)));
3477
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3488
	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3478
		   I915_READ(VSYNC(cpu_transcoder)));
3489
		   I915_READ(VSYNC(cpu_transcoder)));
3479
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3490
	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3480
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3491
		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3481
}
3492
}
3482
 
3493
 
3483
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3494
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3484
{
3495
{
3485
	struct drm_i915_private *dev_priv = dev->dev_private;
3496
	struct drm_i915_private *dev_priv = dev->dev_private;
3486
	uint32_t temp;
3497
	uint32_t temp;
3487
 
3498
 
3488
	temp = I915_READ(SOUTH_CHICKEN1);
3499
	temp = I915_READ(SOUTH_CHICKEN1);
3489
	if (temp & FDI_BC_BIFURCATION_SELECT)
3500
	if (temp & FDI_BC_BIFURCATION_SELECT)
3490
		return;
3501
		return;
3491
 
3502
 
3492
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3503
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3493
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3504
	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3494
 
3505
 
3495
	temp |= FDI_BC_BIFURCATION_SELECT;
3506
	temp |= FDI_BC_BIFURCATION_SELECT;
3496
	DRM_DEBUG_KMS("enabling fdi C rx\n");
3507
	DRM_DEBUG_KMS("enabling fdi C rx\n");
3497
	I915_WRITE(SOUTH_CHICKEN1, temp);
3508
	I915_WRITE(SOUTH_CHICKEN1, temp);
3498
	POSTING_READ(SOUTH_CHICKEN1);
3509
	POSTING_READ(SOUTH_CHICKEN1);
3499
}
3510
}
3500
 
3511
 
3501
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3512
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3502
{
3513
{
3503
	struct drm_device *dev = intel_crtc->base.dev;
3514
	struct drm_device *dev = intel_crtc->base.dev;
3504
	struct drm_i915_private *dev_priv = dev->dev_private;
3515
	struct drm_i915_private *dev_priv = dev->dev_private;
3505
 
3516
 
3506
	switch (intel_crtc->pipe) {
3517
	switch (intel_crtc->pipe) {
3507
	case PIPE_A:
3518
	case PIPE_A:
3508
		break;
3519
		break;
3509
	case PIPE_B:
3520
	case PIPE_B:
3510
		if (intel_crtc->config.fdi_lanes > 2)
3521
		if (intel_crtc->config.fdi_lanes > 2)
3511
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3522
			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3512
		else
3523
		else
3513
			cpt_enable_fdi_bc_bifurcation(dev);
3524
			cpt_enable_fdi_bc_bifurcation(dev);
3514
 
3525
 
3515
		break;
3526
		break;
3516
	case PIPE_C:
3527
	case PIPE_C:
3517
		cpt_enable_fdi_bc_bifurcation(dev);
3528
		cpt_enable_fdi_bc_bifurcation(dev);
3518
 
3529
 
3519
		break;
3530
		break;
3520
	default:
3531
	default:
3521
		BUG();
3532
		BUG();
3522
	}
3533
	}
3523
}
3534
}
3524
 
3535
 
3525
/*
3536
/*
3526
 * Enable PCH resources required for PCH ports:
3537
 * Enable PCH resources required for PCH ports:
3527
 *   - PCH PLLs
3538
 *   - PCH PLLs
3528
 *   - FDI training & RX/TX
3539
 *   - FDI training & RX/TX
3529
 *   - update transcoder timings
3540
 *   - update transcoder timings
3530
 *   - DP transcoding bits
3541
 *   - DP transcoding bits
3531
 *   - transcoder
3542
 *   - transcoder
3532
 */
3543
 */
3533
static void ironlake_pch_enable(struct drm_crtc *crtc)
3544
static void ironlake_pch_enable(struct drm_crtc *crtc)
3534
{
3545
{
3535
	struct drm_device *dev = crtc->dev;
3546
	struct drm_device *dev = crtc->dev;
3536
	struct drm_i915_private *dev_priv = dev->dev_private;
3547
	struct drm_i915_private *dev_priv = dev->dev_private;
3537
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3548
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3538
	int pipe = intel_crtc->pipe;
3549
	int pipe = intel_crtc->pipe;
3539
	u32 reg, temp;
3550
	u32 reg, temp;
3540
 
3551
 
3541
	assert_pch_transcoder_disabled(dev_priv, pipe);
3552
	assert_pch_transcoder_disabled(dev_priv, pipe);
3542
 
3553
 
3543
	if (IS_IVYBRIDGE(dev))
3554
	if (IS_IVYBRIDGE(dev))
3544
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3555
		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3545
 
3556
 
3546
	/* Write the TU size bits before fdi link training, so that error
3557
	/* Write the TU size bits before fdi link training, so that error
3547
	 * detection works. */
3558
	 * detection works. */
3548
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3559
	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3549
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3560
		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3550
 
3561
 
3551
	/* For PCH output, training FDI link */
3562
	/* For PCH output, training FDI link */
3552
	dev_priv->display.fdi_link_train(crtc);
3563
	dev_priv->display.fdi_link_train(crtc);
3553
 
3564
 
3554
	/* We need to program the right clock selection before writing the pixel
3565
	/* We need to program the right clock selection before writing the pixel
3555
	 * mutliplier into the DPLL. */
3566
	 * mutliplier into the DPLL. */
3556
	if (HAS_PCH_CPT(dev)) {
3567
	if (HAS_PCH_CPT(dev)) {
3557
		u32 sel;
3568
		u32 sel;
3558
 
3569
 
3559
		temp = I915_READ(PCH_DPLL_SEL);
3570
		temp = I915_READ(PCH_DPLL_SEL);
3560
		temp |= TRANS_DPLL_ENABLE(pipe);
3571
		temp |= TRANS_DPLL_ENABLE(pipe);
3561
		sel = TRANS_DPLLB_SEL(pipe);
3572
		sel = TRANS_DPLLB_SEL(pipe);
3562
		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3573
		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3563
			temp |= sel;
3574
			temp |= sel;
3564
		else
3575
		else
3565
			temp &= ~sel;
3576
			temp &= ~sel;
3566
		I915_WRITE(PCH_DPLL_SEL, temp);
3577
		I915_WRITE(PCH_DPLL_SEL, temp);
3567
	}
3578
	}
3568
 
3579
 
3569
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3580
	/* XXX: pch pll's can be enabled any time before we enable the PCH
3570
	 * transcoder, and we actually should do this to not upset any PCH
3581
	 * transcoder, and we actually should do this to not upset any PCH
3571
	 * transcoder that already use the clock when we share it.
3582
	 * transcoder that already use the clock when we share it.
3572
	 *
3583
	 *
3573
	 * Note that enable_shared_dpll tries to do the right thing, but
3584
	 * Note that enable_shared_dpll tries to do the right thing, but
3574
	 * get_shared_dpll unconditionally resets the pll - we need that to have
3585
	 * get_shared_dpll unconditionally resets the pll - we need that to have
3575
	 * the right LVDS enable sequence. */
3586
	 * the right LVDS enable sequence. */
3576
	intel_enable_shared_dpll(intel_crtc);
3587
	intel_enable_shared_dpll(intel_crtc);
3577
 
3588
 
3578
	/* set transcoder timing, panel must allow it */
3589
	/* set transcoder timing, panel must allow it */
3579
	assert_panel_unlocked(dev_priv, pipe);
3590
	assert_panel_unlocked(dev_priv, pipe);
3580
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3591
	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3581
 
3592
 
3582
	intel_fdi_normal_train(crtc);
3593
	intel_fdi_normal_train(crtc);
3583
 
3594
 
3584
	/* For PCH DP, enable TRANS_DP_CTL */
3595
	/* For PCH DP, enable TRANS_DP_CTL */
3585
	if (HAS_PCH_CPT(dev) &&
3596
	if (HAS_PCH_CPT(dev) &&
3586
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3597
	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3587
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3598
	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3588
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3599
		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3589
		reg = TRANS_DP_CTL(pipe);
3600
		reg = TRANS_DP_CTL(pipe);
3590
		temp = I915_READ(reg);
3601
		temp = I915_READ(reg);
3591
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3602
		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3592
			  TRANS_DP_SYNC_MASK |
3603
			  TRANS_DP_SYNC_MASK |
3593
			  TRANS_DP_BPC_MASK);
3604
			  TRANS_DP_BPC_MASK);
3594
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3605
		temp |= (TRANS_DP_OUTPUT_ENABLE |
3595
			 TRANS_DP_ENH_FRAMING);
3606
			 TRANS_DP_ENH_FRAMING);
3596
		temp |= bpc << 9; /* same format but at 11:9 */
3607
		temp |= bpc << 9; /* same format but at 11:9 */
3597
 
3608
 
3598
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3609
		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3599
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3610
			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3600
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3611
		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3601
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3612
			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3602
 
3613
 
3603
		switch (intel_trans_dp_port_sel(crtc)) {
3614
		switch (intel_trans_dp_port_sel(crtc)) {
3604
		case PCH_DP_B:
3615
		case PCH_DP_B:
3605
			temp |= TRANS_DP_PORT_SEL_B;
3616
			temp |= TRANS_DP_PORT_SEL_B;
3606
			break;
3617
			break;
3607
		case PCH_DP_C:
3618
		case PCH_DP_C:
3608
			temp |= TRANS_DP_PORT_SEL_C;
3619
			temp |= TRANS_DP_PORT_SEL_C;
3609
			break;
3620
			break;
3610
		case PCH_DP_D:
3621
		case PCH_DP_D:
3611
			temp |= TRANS_DP_PORT_SEL_D;
3622
			temp |= TRANS_DP_PORT_SEL_D;
3612
			break;
3623
			break;
3613
		default:
3624
		default:
3614
			BUG();
3625
			BUG();
3615
		}
3626
		}
3616
 
3627
 
3617
		I915_WRITE(reg, temp);
3628
		I915_WRITE(reg, temp);
3618
	}
3629
	}
3619
 
3630
 
3620
	ironlake_enable_pch_transcoder(dev_priv, pipe);
3631
	ironlake_enable_pch_transcoder(dev_priv, pipe);
3621
}
3632
}
3622
 
3633
 
3623
static void lpt_pch_enable(struct drm_crtc *crtc)
3634
static void lpt_pch_enable(struct drm_crtc *crtc)
3624
{
3635
{
3625
	struct drm_device *dev = crtc->dev;
3636
	struct drm_device *dev = crtc->dev;
3626
	struct drm_i915_private *dev_priv = dev->dev_private;
3637
	struct drm_i915_private *dev_priv = dev->dev_private;
3627
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3638
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3628
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3639
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3629
 
3640
 
3630
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3641
	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3631
 
3642
 
3632
	lpt_program_iclkip(crtc);
3643
	lpt_program_iclkip(crtc);
3633
 
3644
 
3634
	/* Set transcoder timing. */
3645
	/* Set transcoder timing. */
3635
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3646
	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3636
 
3647
 
3637
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3648
	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3638
}
3649
}
3639
 
3650
 
3640
void intel_put_shared_dpll(struct intel_crtc *crtc)
3651
void intel_put_shared_dpll(struct intel_crtc *crtc)
3641
{
3652
{
3642
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3653
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3643
 
3654
 
3644
	if (pll == NULL)
3655
	if (pll == NULL)
3645
		return;
3656
		return;
3646
 
3657
 
3647
	if (pll->refcount == 0) {
3658
	if (pll->refcount == 0) {
3648
		WARN(1, "bad %s refcount\n", pll->name);
3659
		WARN(1, "bad %s refcount\n", pll->name);
3649
		return;
3660
		return;
3650
	}
3661
	}
3651
 
3662
 
3652
	if (--pll->refcount == 0) {
3663
	if (--pll->refcount == 0) {
3653
		WARN_ON(pll->on);
3664
		WARN_ON(pll->on);
3654
		WARN_ON(pll->active);
3665
		WARN_ON(pll->active);
3655
	}
3666
	}
3656
 
3667
 
3657
	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3668
	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3658
}
3669
}
3659
 
3670
 
3660
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3671
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3661
{
3672
{
3662
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3673
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3663
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3674
	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3664
	enum intel_dpll_id i;
3675
	enum intel_dpll_id i;
3665
 
3676
 
3666
	if (pll) {
3677
	if (pll) {
3667
		DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3678
		DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3668
			      crtc->base.base.id, pll->name);
3679
			      crtc->base.base.id, pll->name);
3669
		intel_put_shared_dpll(crtc);
3680
		intel_put_shared_dpll(crtc);
3670
	}
3681
	}
3671
 
3682
 
3672
	if (HAS_PCH_IBX(dev_priv->dev)) {
3683
	if (HAS_PCH_IBX(dev_priv->dev)) {
3673
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3684
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3674
		i = (enum intel_dpll_id) crtc->pipe;
3685
		i = (enum intel_dpll_id) crtc->pipe;
3675
		pll = &dev_priv->shared_dplls[i];
3686
		pll = &dev_priv->shared_dplls[i];
3676
 
3687
 
3677
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3688
		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3678
			      crtc->base.base.id, pll->name);
3689
			      crtc->base.base.id, pll->name);
3679
 
3690
 
3680
		WARN_ON(pll->refcount);
3691
		WARN_ON(pll->refcount);
3681
 
3692
 
3682
		goto found;
3693
		goto found;
3683
	}
3694
	}
3684
 
3695
 
3685
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3696
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3686
		pll = &dev_priv->shared_dplls[i];
3697
		pll = &dev_priv->shared_dplls[i];
3687
 
3698
 
3688
		/* Only want to check enabled timings first */
3699
		/* Only want to check enabled timings first */
3689
		if (pll->refcount == 0)
3700
		if (pll->refcount == 0)
3690
			continue;
3701
			continue;
3691
 
3702
 
3692
		if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3703
		if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3693
			   sizeof(pll->hw_state)) == 0) {
3704
			   sizeof(pll->hw_state)) == 0) {
3694
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3705
			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3695
				      crtc->base.base.id,
3706
				      crtc->base.base.id,
3696
				      pll->name, pll->refcount, pll->active);
3707
				      pll->name, pll->refcount, pll->active);
3697
 
3708
 
3698
			goto found;
3709
			goto found;
3699
		}
3710
		}
3700
	}
3711
	}
3701
 
3712
 
3702
	/* Ok no matching timings, maybe there's a free one? */
3713
	/* Ok no matching timings, maybe there's a free one? */
3703
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3714
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3704
		pll = &dev_priv->shared_dplls[i];
3715
		pll = &dev_priv->shared_dplls[i];
3705
		if (pll->refcount == 0) {
3716
		if (pll->refcount == 0) {
3706
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3717
			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3707
				      crtc->base.base.id, pll->name);
3718
				      crtc->base.base.id, pll->name);
3708
			goto found;
3719
			goto found;
3709
		}
3720
		}
3710
	}
3721
	}
3711
 
3722
 
3712
	return NULL;
3723
	return NULL;
3713
 
3724
 
3714
found:
3725
found:
3715
	if (pll->refcount == 0)
3726
	if (pll->refcount == 0)
3716
		pll->hw_state = crtc->config.dpll_hw_state;
3727
		pll->hw_state = crtc->config.dpll_hw_state;
3717
 
3728
 
3718
	crtc->config.shared_dpll = i;
3729
	crtc->config.shared_dpll = i;
3719
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3730
	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3720
			 pipe_name(crtc->pipe));
3731
			 pipe_name(crtc->pipe));
3721
 
3732
 
3722
	pll->refcount++;
3733
	pll->refcount++;
3723
 
3734
 
3724
	return pll;
3735
	return pll;
3725
}
3736
}
3726
 
3737
 
3727
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3738
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3728
{
3739
{
3729
	struct drm_i915_private *dev_priv = dev->dev_private;
3740
	struct drm_i915_private *dev_priv = dev->dev_private;
3730
	int dslreg = PIPEDSL(pipe);
3741
	int dslreg = PIPEDSL(pipe);
3731
	u32 temp;
3742
	u32 temp;
3732
 
3743
 
3733
	temp = I915_READ(dslreg);
3744
	temp = I915_READ(dslreg);
3734
	udelay(500);
3745
	udelay(500);
3735
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
3746
	if (wait_for(I915_READ(dslreg) != temp, 5)) {
3736
		if (wait_for(I915_READ(dslreg) != temp, 5))
3747
		if (wait_for(I915_READ(dslreg) != temp, 5))
3737
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3748
			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3738
	}
3749
	}
3739
}
3750
}
3740
 
3751
 
3741
static void ironlake_pfit_enable(struct intel_crtc *crtc)
3752
static void ironlake_pfit_enable(struct intel_crtc *crtc)
3742
{
3753
{
3743
	struct drm_device *dev = crtc->base.dev;
3754
	struct drm_device *dev = crtc->base.dev;
3744
	struct drm_i915_private *dev_priv = dev->dev_private;
3755
	struct drm_i915_private *dev_priv = dev->dev_private;
3745
	int pipe = crtc->pipe;
3756
	int pipe = crtc->pipe;
3746
 
3757
 
3747
	if (crtc->config.pch_pfit.enabled) {
3758
	if (crtc->config.pch_pfit.enabled) {
3748
		/* Force use of hard-coded filter coefficients
3759
		/* Force use of hard-coded filter coefficients
3749
		 * as some pre-programmed values are broken,
3760
		 * as some pre-programmed values are broken,
3750
		 * e.g. x201.
3761
		 * e.g. x201.
3751
		 */
3762
		 */
3752
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3763
		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3753
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3764
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3754
						 PF_PIPE_SEL_IVB(pipe));
3765
						 PF_PIPE_SEL_IVB(pipe));
3755
		else
3766
		else
3756
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3767
			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3757
		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3768
		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3758
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3769
		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3759
	}
3770
	}
3760
}
3771
}
3761
 
3772
 
3762
static void intel_enable_planes(struct drm_crtc *crtc)
3773
static void intel_enable_planes(struct drm_crtc *crtc)
3763
{
3774
{
3764
	struct drm_device *dev = crtc->dev;
3775
	struct drm_device *dev = crtc->dev;
3765
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3776
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3766
	struct drm_plane *plane;
3777
	struct drm_plane *plane;
3767
	struct intel_plane *intel_plane;
3778
	struct intel_plane *intel_plane;
3768
 
3779
 
3769
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3780
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3770
		intel_plane = to_intel_plane(plane);
3781
		intel_plane = to_intel_plane(plane);
3771
		if (intel_plane->pipe == pipe)
3782
		if (intel_plane->pipe == pipe)
3772
			intel_plane_restore(&intel_plane->base);
3783
			intel_plane_restore(&intel_plane->base);
3773
	}
3784
	}
3774
}
3785
}
3775
 
3786
 
3776
static void intel_disable_planes(struct drm_crtc *crtc)
3787
static void intel_disable_planes(struct drm_crtc *crtc)
3777
{
3788
{
3778
	struct drm_device *dev = crtc->dev;
3789
	struct drm_device *dev = crtc->dev;
3779
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3790
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3780
	struct drm_plane *plane;
3791
	struct drm_plane *plane;
3781
	struct intel_plane *intel_plane;
3792
	struct intel_plane *intel_plane;
3782
 
3793
 
3783
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3794
	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3784
		intel_plane = to_intel_plane(plane);
3795
		intel_plane = to_intel_plane(plane);
3785
		if (intel_plane->pipe == pipe)
3796
		if (intel_plane->pipe == pipe)
3786
			intel_plane_disable(&intel_plane->base);
3797
			intel_plane_disable(&intel_plane->base);
3787
	}
3798
	}
3788
}
3799
}
3789
 
3800
 
3790
void hsw_enable_ips(struct intel_crtc *crtc)
3801
void hsw_enable_ips(struct intel_crtc *crtc)
3791
{
3802
{
3792
	struct drm_device *dev = crtc->base.dev;
3803
	struct drm_device *dev = crtc->base.dev;
3793
	struct drm_i915_private *dev_priv = dev->dev_private;
3804
	struct drm_i915_private *dev_priv = dev->dev_private;
3794
 
3805
 
3795
	if (!crtc->config.ips_enabled)
3806
	if (!crtc->config.ips_enabled)
3796
		return;
3807
		return;
3797
 
3808
 
3798
	/* We can only enable IPS after we enable a plane and wait for a vblank */
3809
	/* We can only enable IPS after we enable a plane and wait for a vblank */
3799
	intel_wait_for_vblank(dev, crtc->pipe);
3810
	intel_wait_for_vblank(dev, crtc->pipe);
3800
 
3811
 
3801
	assert_plane_enabled(dev_priv, crtc->plane);
3812
	assert_plane_enabled(dev_priv, crtc->plane);
3802
	if (IS_BROADWELL(dev)) {
3813
	if (IS_BROADWELL(dev)) {
3803
		mutex_lock(&dev_priv->rps.hw_lock);
3814
		mutex_lock(&dev_priv->rps.hw_lock);
3804
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3815
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3805
		mutex_unlock(&dev_priv->rps.hw_lock);
3816
		mutex_unlock(&dev_priv->rps.hw_lock);
3806
		/* Quoting Art Runyan: "its not safe to expect any particular
3817
		/* Quoting Art Runyan: "its not safe to expect any particular
3807
		 * value in IPS_CTL bit 31 after enabling IPS through the
3818
		 * value in IPS_CTL bit 31 after enabling IPS through the
3808
		 * mailbox." Moreover, the mailbox may return a bogus state,
3819
		 * mailbox." Moreover, the mailbox may return a bogus state,
3809
		 * so we need to just enable it and continue on.
3820
		 * so we need to just enable it and continue on.
3810
		 */
3821
		 */
3811
	} else {
3822
	} else {
3812
		I915_WRITE(IPS_CTL, IPS_ENABLE);
3823
		I915_WRITE(IPS_CTL, IPS_ENABLE);
3813
		/* The bit only becomes 1 in the next vblank, so this wait here
3824
		/* The bit only becomes 1 in the next vblank, so this wait here
3814
		 * is essentially intel_wait_for_vblank. If we don't have this
3825
		 * is essentially intel_wait_for_vblank. If we don't have this
3815
		 * and don't wait for vblanks until the end of crtc_enable, then
3826
		 * and don't wait for vblanks until the end of crtc_enable, then
3816
		 * the HW state readout code will complain that the expected
3827
		 * the HW state readout code will complain that the expected
3817
		 * IPS_CTL value is not the one we read. */
3828
		 * IPS_CTL value is not the one we read. */
3818
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3829
		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3819
			DRM_ERROR("Timed out waiting for IPS enable\n");
3830
			DRM_ERROR("Timed out waiting for IPS enable\n");
3820
	}
3831
	}
3821
}
3832
}
3822
 
3833
 
3823
void hsw_disable_ips(struct intel_crtc *crtc)
3834
void hsw_disable_ips(struct intel_crtc *crtc)
3824
{
3835
{
3825
	struct drm_device *dev = crtc->base.dev;
3836
	struct drm_device *dev = crtc->base.dev;
3826
	struct drm_i915_private *dev_priv = dev->dev_private;
3837
	struct drm_i915_private *dev_priv = dev->dev_private;
3827
 
3838
 
3828
	if (!crtc->config.ips_enabled)
3839
	if (!crtc->config.ips_enabled)
3829
		return;
3840
		return;
3830
 
3841
 
3831
	assert_plane_enabled(dev_priv, crtc->plane);
3842
	assert_plane_enabled(dev_priv, crtc->plane);
3832
	if (IS_BROADWELL(dev)) {
3843
	if (IS_BROADWELL(dev)) {
3833
		mutex_lock(&dev_priv->rps.hw_lock);
3844
		mutex_lock(&dev_priv->rps.hw_lock);
3834
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3845
		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3835
		mutex_unlock(&dev_priv->rps.hw_lock);
3846
		mutex_unlock(&dev_priv->rps.hw_lock);
3836
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
3847
		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
3837
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
3848
		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
3838
			DRM_ERROR("Timed out waiting for IPS disable\n");
3849
			DRM_ERROR("Timed out waiting for IPS disable\n");
3839
	} else {
3850
	} else {
3840
		I915_WRITE(IPS_CTL, 0);
3851
		I915_WRITE(IPS_CTL, 0);
3841
		POSTING_READ(IPS_CTL);
3852
		POSTING_READ(IPS_CTL);
3842
	}
3853
	}
3843
 
3854
 
3844
	/* We need to wait for a vblank before we can disable the plane. */
3855
	/* We need to wait for a vblank before we can disable the plane. */
3845
	intel_wait_for_vblank(dev, crtc->pipe);
3856
	intel_wait_for_vblank(dev, crtc->pipe);
3846
}
3857
}
3847
 
3858
 
3848
/** Loads the palette/gamma unit for the CRTC with the prepared values */
3859
/** Loads the palette/gamma unit for the CRTC with the prepared values */
3849
static void intel_crtc_load_lut(struct drm_crtc *crtc)
3860
static void intel_crtc_load_lut(struct drm_crtc *crtc)
3850
{
3861
{
3851
	struct drm_device *dev = crtc->dev;
3862
	struct drm_device *dev = crtc->dev;
3852
	struct drm_i915_private *dev_priv = dev->dev_private;
3863
	struct drm_i915_private *dev_priv = dev->dev_private;
3853
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3864
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3854
	enum pipe pipe = intel_crtc->pipe;
3865
	enum pipe pipe = intel_crtc->pipe;
3855
	int palreg = PALETTE(pipe);
3866
	int palreg = PALETTE(pipe);
3856
	int i;
3867
	int i;
3857
	bool reenable_ips = false;
3868
	bool reenable_ips = false;
3858
 
3869
 
3859
	/* The clocks have to be on to load the palette. */
3870
	/* The clocks have to be on to load the palette. */
3860
	if (!crtc->enabled || !intel_crtc->active)
3871
	if (!crtc->enabled || !intel_crtc->active)
3861
		return;
3872
		return;
3862
 
3873
 
3863
	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3874
	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3864
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3875
		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3865
			assert_dsi_pll_enabled(dev_priv);
3876
			assert_dsi_pll_enabled(dev_priv);
3866
		else
3877
		else
3867
			assert_pll_enabled(dev_priv, pipe);
3878
			assert_pll_enabled(dev_priv, pipe);
3868
	}
3879
	}
3869
 
3880
 
3870
	/* use legacy palette for Ironlake */
3881
	/* use legacy palette for Ironlake */
3871
	if (!HAS_GMCH_DISPLAY(dev))
3882
	if (!HAS_GMCH_DISPLAY(dev))
3872
		palreg = LGC_PALETTE(pipe);
3883
		palreg = LGC_PALETTE(pipe);
3873
 
3884
 
3874
	/* Workaround : Do not read or write the pipe palette/gamma data while
3885
	/* Workaround : Do not read or write the pipe palette/gamma data while
3875
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3886
	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3876
	 */
3887
	 */
3877
	if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3888
	if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3878
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3889
	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3879
	     GAMMA_MODE_MODE_SPLIT)) {
3890
	     GAMMA_MODE_MODE_SPLIT)) {
3880
		hsw_disable_ips(intel_crtc);
3891
		hsw_disable_ips(intel_crtc);
3881
		reenable_ips = true;
3892
		reenable_ips = true;
3882
	}
3893
	}
3883
 
3894
 
3884
	for (i = 0; i < 256; i++) {
3895
	for (i = 0; i < 256; i++) {
3885
		I915_WRITE(palreg + 4 * i,
3896
		I915_WRITE(palreg + 4 * i,
3886
			   (intel_crtc->lut_r[i] << 16) |
3897
			   (intel_crtc->lut_r[i] << 16) |
3887
			   (intel_crtc->lut_g[i] << 8) |
3898
			   (intel_crtc->lut_g[i] << 8) |
3888
			   intel_crtc->lut_b[i]);
3899
			   intel_crtc->lut_b[i]);
3889
	}
3900
	}
3890
 
3901
 
3891
	if (reenable_ips)
3902
	if (reenable_ips)
3892
		hsw_enable_ips(intel_crtc);
3903
		hsw_enable_ips(intel_crtc);
3893
}
3904
}
3894
 
3905
 
3895
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3906
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3896
{
3907
{
3897
	if (!enable && intel_crtc->overlay) {
3908
	if (!enable && intel_crtc->overlay) {
3898
		struct drm_device *dev = intel_crtc->base.dev;
3909
		struct drm_device *dev = intel_crtc->base.dev;
3899
		struct drm_i915_private *dev_priv = dev->dev_private;
3910
		struct drm_i915_private *dev_priv = dev->dev_private;
3900
 
3911
 
3901
		mutex_lock(&dev->struct_mutex);
3912
		mutex_lock(&dev->struct_mutex);
3902
		dev_priv->mm.interruptible = false;
3913
		dev_priv->mm.interruptible = false;
3903
        dev_priv->mm.interruptible = true;
3914
        dev_priv->mm.interruptible = true;
3904
		mutex_unlock(&dev->struct_mutex);
3915
		mutex_unlock(&dev->struct_mutex);
3905
	}
3916
	}
3906
 
3917
 
3907
	/* Let userspace switch the overlay on again. In most cases userspace
3918
	/* Let userspace switch the overlay on again. In most cases userspace
3908
	 * has to recompute where to put it anyway.
3919
	 * has to recompute where to put it anyway.
3909
	 */
3920
	 */
3910
}
3921
}
3911
 
3922
 
3912
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3923
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3913
{
3924
{
3914
	struct drm_device *dev = crtc->dev;
3925
	struct drm_device *dev = crtc->dev;
3915
	struct drm_i915_private *dev_priv = dev->dev_private;
3926
	struct drm_i915_private *dev_priv = dev->dev_private;
3916
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3927
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3917
	int pipe = intel_crtc->pipe;
3928
	int pipe = intel_crtc->pipe;
3918
	int plane = intel_crtc->plane;
3929
	int plane = intel_crtc->plane;
3919
 
3930
 
3920
	drm_vblank_on(dev, pipe);
3931
	drm_vblank_on(dev, pipe);
3921
 
3932
 
3922
	intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3933
	intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3923
	intel_enable_planes(crtc);
3934
	intel_enable_planes(crtc);
3924
	intel_crtc_update_cursor(crtc, true);
3935
	intel_crtc_update_cursor(crtc, true);
3925
	intel_crtc_dpms_overlay(intel_crtc, true);
3936
	intel_crtc_dpms_overlay(intel_crtc, true);
3926
 
3937
 
3927
	hsw_enable_ips(intel_crtc);
3938
	hsw_enable_ips(intel_crtc);
3928
 
3939
 
3929
	mutex_lock(&dev->struct_mutex);
3940
	mutex_lock(&dev->struct_mutex);
3930
	intel_update_fbc(dev);
3941
	intel_update_fbc(dev);
3931
	mutex_unlock(&dev->struct_mutex);
3942
	mutex_unlock(&dev->struct_mutex);
3932
}
3943
}
3933
 
3944
 
3934
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3945
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3935
{
3946
{
3936
	struct drm_device *dev = crtc->dev;
3947
	struct drm_device *dev = crtc->dev;
3937
	struct drm_i915_private *dev_priv = dev->dev_private;
3948
	struct drm_i915_private *dev_priv = dev->dev_private;
3938
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3949
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3939
	int pipe = intel_crtc->pipe;
3950
	int pipe = intel_crtc->pipe;
3940
	int plane = intel_crtc->plane;
3951
	int plane = intel_crtc->plane;
3941
 
3952
 
3942
 
3953
 
3943
	if (dev_priv->fbc.plane == plane)
3954
	if (dev_priv->fbc.plane == plane)
3944
		intel_disable_fbc(dev);
3955
		intel_disable_fbc(dev);
3945
 
3956
 
3946
	hsw_disable_ips(intel_crtc);
3957
	hsw_disable_ips(intel_crtc);
3947
 
3958
 
3948
	intel_crtc_dpms_overlay(intel_crtc, false);
3959
	intel_crtc_dpms_overlay(intel_crtc, false);
3949
	intel_crtc_update_cursor(crtc, false);
3960
	intel_crtc_update_cursor(crtc, false);
3950
	intel_disable_planes(crtc);
3961
	intel_disable_planes(crtc);
3951
	intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3962
	intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3952
	drm_vblank_off(dev, pipe);
3963
	drm_vblank_off(dev, pipe);
3953
}
3964
}
3954
 
3965
 
3955
static void ironlake_crtc_enable(struct drm_crtc *crtc)
3966
static void ironlake_crtc_enable(struct drm_crtc *crtc)
3956
{
3967
{
3957
    struct drm_device *dev = crtc->dev;
3968
    struct drm_device *dev = crtc->dev;
3958
    struct drm_i915_private *dev_priv = dev->dev_private;
3969
    struct drm_i915_private *dev_priv = dev->dev_private;
3959
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3970
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3960
	struct intel_encoder *encoder;
3971
	struct intel_encoder *encoder;
3961
    int pipe = intel_crtc->pipe;
3972
    int pipe = intel_crtc->pipe;
3962
	enum plane plane = intel_crtc->plane;
3973
	enum plane plane = intel_crtc->plane;
3963
 
3974
 
3964
	WARN_ON(!crtc->enabled);
3975
	WARN_ON(!crtc->enabled);
3965
 
3976
 
3966
    if (intel_crtc->active)
3977
    if (intel_crtc->active)
3967
        return;
3978
        return;
3968
 
3979
 
3969
	if (intel_crtc->config.has_pch_encoder)
3980
	if (intel_crtc->config.has_pch_encoder)
3970
		intel_prepare_shared_dpll(intel_crtc);
3981
		intel_prepare_shared_dpll(intel_crtc);
3971
 
3982
 
3972
	if (intel_crtc->config.has_dp_encoder)
3983
	if (intel_crtc->config.has_dp_encoder)
3973
		intel_dp_set_m_n(intel_crtc);
3984
		intel_dp_set_m_n(intel_crtc);
3974
 
3985
 
3975
	intel_set_pipe_timings(intel_crtc);
3986
	intel_set_pipe_timings(intel_crtc);
3976
 
3987
 
3977
	if (intel_crtc->config.has_pch_encoder) {
3988
	if (intel_crtc->config.has_pch_encoder) {
3978
		intel_cpu_transcoder_set_m_n(intel_crtc,
3989
		intel_cpu_transcoder_set_m_n(intel_crtc,
3979
					     &intel_crtc->config.fdi_m_n);
3990
					     &intel_crtc->config.fdi_m_n);
3980
	}
3991
	}
3981
 
3992
 
3982
	ironlake_set_pipeconf(crtc);
3993
	ironlake_set_pipeconf(crtc);
3983
 
3994
 
3984
	/* Set up the display plane register */
3995
	/* Set up the display plane register */
3985
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
3996
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
3986
	POSTING_READ(DSPCNTR(plane));
3997
	POSTING_READ(DSPCNTR(plane));
3987
 
3998
 
3988
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
3999
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
3989
					       crtc->x, crtc->y);
4000
					       crtc->x, crtc->y);
3990
 
4001
 
3991
    intel_crtc->active = true;
4002
    intel_crtc->active = true;
3992
 
4003
 
3993
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4004
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3994
	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4005
	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3995
 
4006
 
3996
	for_each_encoder_on_crtc(dev, crtc, encoder)
4007
	for_each_encoder_on_crtc(dev, crtc, encoder)
3997
		if (encoder->pre_enable)
4008
		if (encoder->pre_enable)
3998
			encoder->pre_enable(encoder);
4009
			encoder->pre_enable(encoder);
3999
 
4010
 
4000
	if (intel_crtc->config.has_pch_encoder) {
4011
	if (intel_crtc->config.has_pch_encoder) {
4001
		/* Note: FDI PLL enabling _must_ be done before we enable the
4012
		/* Note: FDI PLL enabling _must_ be done before we enable the
4002
		 * cpu pipes, hence this is separate from all the other fdi/pch
4013
		 * cpu pipes, hence this is separate from all the other fdi/pch
4003
		 * enabling. */
4014
		 * enabling. */
4004
		ironlake_fdi_pll_enable(intel_crtc);
4015
		ironlake_fdi_pll_enable(intel_crtc);
4005
	} else {
4016
	} else {
4006
		assert_fdi_tx_disabled(dev_priv, pipe);
4017
		assert_fdi_tx_disabled(dev_priv, pipe);
4007
		assert_fdi_rx_disabled(dev_priv, pipe);
4018
		assert_fdi_rx_disabled(dev_priv, pipe);
4008
	}
4019
	}
4009
 
4020
 
4010
	ironlake_pfit_enable(intel_crtc);
4021
	ironlake_pfit_enable(intel_crtc);
4011
 
4022
 
4012
    /*
4023
    /*
4013
     * On ILK+ LUT must be loaded before the pipe is running but with
4024
     * On ILK+ LUT must be loaded before the pipe is running but with
4014
     * clocks enabled
4025
     * clocks enabled
4015
     */
4026
     */
4016
    intel_crtc_load_lut(crtc);
4027
    intel_crtc_load_lut(crtc);
4017
 
4028
 
4018
	intel_update_watermarks(crtc);
4029
	intel_update_watermarks(crtc);
4019
	intel_enable_pipe(intel_crtc);
4030
	intel_enable_pipe(intel_crtc);
4020
 
4031
 
4021
	if (intel_crtc->config.has_pch_encoder)
4032
	if (intel_crtc->config.has_pch_encoder)
4022
        ironlake_pch_enable(crtc);
4033
        ironlake_pch_enable(crtc);
4023
 
4034
 
4024
	for_each_encoder_on_crtc(dev, crtc, encoder)
4035
	for_each_encoder_on_crtc(dev, crtc, encoder)
4025
		encoder->enable(encoder);
4036
		encoder->enable(encoder);
4026
 
4037
 
4027
	if (HAS_PCH_CPT(dev))
4038
	if (HAS_PCH_CPT(dev))
4028
		cpt_verify_modeset(dev, intel_crtc->pipe);
4039
		cpt_verify_modeset(dev, intel_crtc->pipe);
4029
 
4040
 
4030
	intel_crtc_enable_planes(crtc);
4041
	intel_crtc_enable_planes(crtc);
4031
}
4042
}
4032
 
4043
 
4033
/* IPS only exists on ULT machines and is tied to pipe A. */
4044
/* IPS only exists on ULT machines and is tied to pipe A. */
4034
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4045
static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4035
{
4046
{
4036
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4047
	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4037
}
4048
}
4038
 
4049
 
4039
/*
4050
/*
4040
 * This implements the workaround described in the "notes" section of the mode
4051
 * This implements the workaround described in the "notes" section of the mode
4041
 * set sequence documentation. When going from no pipes or single pipe to
4052
 * set sequence documentation. When going from no pipes or single pipe to
4042
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
4053
 * multiple pipes, and planes are enabled after the pipe, we need to wait at
4043
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4054
 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4044
 */
4055
 */
4045
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4056
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4046
{
4057
{
4047
	struct drm_device *dev = crtc->base.dev;
4058
	struct drm_device *dev = crtc->base.dev;
4048
	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4059
	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4049
 
4060
 
4050
	/* We want to get the other_active_crtc only if there's only 1 other
4061
	/* We want to get the other_active_crtc only if there's only 1 other
4051
	 * active crtc. */
4062
	 * active crtc. */
4052
	for_each_intel_crtc(dev, crtc_it) {
4063
	for_each_intel_crtc(dev, crtc_it) {
4053
		if (!crtc_it->active || crtc_it == crtc)
4064
		if (!crtc_it->active || crtc_it == crtc)
4054
			continue;
4065
			continue;
4055
 
4066
 
4056
		if (other_active_crtc)
4067
		if (other_active_crtc)
4057
		return;
4068
		return;
4058
 
4069
 
4059
		other_active_crtc = crtc_it;
4070
		other_active_crtc = crtc_it;
4060
	}
4071
	}
4061
	if (!other_active_crtc)
4072
	if (!other_active_crtc)
4062
		return;
4073
		return;
4063
 
4074
 
4064
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4075
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4065
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4076
	intel_wait_for_vblank(dev, other_active_crtc->pipe);
4066
}
4077
}
4067
 
4078
 
4068
static void haswell_crtc_enable(struct drm_crtc *crtc)
4079
static void haswell_crtc_enable(struct drm_crtc *crtc)
4069
{
4080
{
4070
	struct drm_device *dev = crtc->dev;
4081
	struct drm_device *dev = crtc->dev;
4071
	struct drm_i915_private *dev_priv = dev->dev_private;
4082
	struct drm_i915_private *dev_priv = dev->dev_private;
4072
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4083
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4073
	struct intel_encoder *encoder;
4084
	struct intel_encoder *encoder;
4074
	int pipe = intel_crtc->pipe;
4085
	int pipe = intel_crtc->pipe;
4075
	enum plane plane = intel_crtc->plane;
4086
	enum plane plane = intel_crtc->plane;
4076
 
4087
 
4077
	WARN_ON(!crtc->enabled);
4088
	WARN_ON(!crtc->enabled);
4078
 
4089
 
4079
	if (intel_crtc->active)
4090
	if (intel_crtc->active)
4080
		return;
4091
		return;
4081
 
4092
 
4082
	if (intel_crtc_to_shared_dpll(intel_crtc))
4093
	if (intel_crtc_to_shared_dpll(intel_crtc))
4083
		intel_enable_shared_dpll(intel_crtc);
4094
		intel_enable_shared_dpll(intel_crtc);
4084
 
4095
 
4085
	if (intel_crtc->config.has_dp_encoder)
4096
	if (intel_crtc->config.has_dp_encoder)
4086
		intel_dp_set_m_n(intel_crtc);
4097
		intel_dp_set_m_n(intel_crtc);
4087
 
4098
 
4088
	intel_set_pipe_timings(intel_crtc);
4099
	intel_set_pipe_timings(intel_crtc);
4089
 
4100
 
4090
	if (intel_crtc->config.has_pch_encoder) {
4101
	if (intel_crtc->config.has_pch_encoder) {
4091
		intel_cpu_transcoder_set_m_n(intel_crtc,
4102
		intel_cpu_transcoder_set_m_n(intel_crtc,
4092
					     &intel_crtc->config.fdi_m_n);
4103
					     &intel_crtc->config.fdi_m_n);
4093
	}
4104
	}
4094
 
4105
 
4095
	haswell_set_pipeconf(crtc);
4106
	haswell_set_pipeconf(crtc);
4096
 
4107
 
4097
	intel_set_pipe_csc(crtc);
4108
	intel_set_pipe_csc(crtc);
4098
 
4109
 
4099
	/* Set up the display plane register */
4110
	/* Set up the display plane register */
4100
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
4111
	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
4101
	POSTING_READ(DSPCNTR(plane));
4112
	POSTING_READ(DSPCNTR(plane));
4102
 
4113
 
4103
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4114
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4104
					       crtc->x, crtc->y);
4115
					       crtc->x, crtc->y);
4105
 
4116
 
4106
	intel_crtc->active = true;
4117
	intel_crtc->active = true;
4107
 
4118
 
4108
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4119
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4109
	for_each_encoder_on_crtc(dev, crtc, encoder)
4120
	for_each_encoder_on_crtc(dev, crtc, encoder)
4110
		if (encoder->pre_enable)
4121
		if (encoder->pre_enable)
4111
			encoder->pre_enable(encoder);
4122
			encoder->pre_enable(encoder);
4112
 
4123
 
4113
	if (intel_crtc->config.has_pch_encoder) {
4124
	if (intel_crtc->config.has_pch_encoder) {
4114
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4125
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4115
		dev_priv->display.fdi_link_train(crtc);
4126
		dev_priv->display.fdi_link_train(crtc);
4116
	}
4127
	}
4117
 
4128
 
4118
	intel_ddi_enable_pipe_clock(intel_crtc);
4129
	intel_ddi_enable_pipe_clock(intel_crtc);
4119
 
4130
 
4120
	ironlake_pfit_enable(intel_crtc);
4131
	ironlake_pfit_enable(intel_crtc);
4121
 
4132
 
4122
	/*
4133
	/*
4123
	 * On ILK+ LUT must be loaded before the pipe is running but with
4134
	 * On ILK+ LUT must be loaded before the pipe is running but with
4124
	 * clocks enabled
4135
	 * clocks enabled
4125
	 */
4136
	 */
4126
	intel_crtc_load_lut(crtc);
4137
	intel_crtc_load_lut(crtc);
4127
 
4138
 
4128
	intel_ddi_set_pipe_settings(crtc);
4139
	intel_ddi_set_pipe_settings(crtc);
4129
	intel_ddi_enable_transcoder_func(crtc);
4140
	intel_ddi_enable_transcoder_func(crtc);
4130
 
4141
 
4131
	intel_update_watermarks(crtc);
4142
	intel_update_watermarks(crtc);
4132
	intel_enable_pipe(intel_crtc);
4143
	intel_enable_pipe(intel_crtc);
4133
 
4144
 
4134
	if (intel_crtc->config.has_pch_encoder)
4145
	if (intel_crtc->config.has_pch_encoder)
4135
		lpt_pch_enable(crtc);
4146
		lpt_pch_enable(crtc);
4136
 
4147
 
4137
	if (intel_crtc->config.dp_encoder_is_mst)
4148
	if (intel_crtc->config.dp_encoder_is_mst)
4138
		intel_ddi_set_vc_payload_alloc(crtc, true);
4149
		intel_ddi_set_vc_payload_alloc(crtc, true);
4139
 
4150
 
4140
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4151
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4141
		encoder->enable(encoder);
4152
		encoder->enable(encoder);
4142
		intel_opregion_notify_encoder(encoder, true);
4153
		intel_opregion_notify_encoder(encoder, true);
4143
	}
4154
	}
4144
 
4155
 
4145
	/* If we change the relative order between pipe/planes enabling, we need
4156
	/* If we change the relative order between pipe/planes enabling, we need
4146
	 * to change the workaround. */
4157
	 * to change the workaround. */
4147
	haswell_mode_set_planes_workaround(intel_crtc);
4158
	haswell_mode_set_planes_workaround(intel_crtc);
4148
	intel_crtc_enable_planes(crtc);
4159
	intel_crtc_enable_planes(crtc);
4149
}
4160
}
4150
 
4161
 
4151
static void ironlake_pfit_disable(struct intel_crtc *crtc)
4162
static void ironlake_pfit_disable(struct intel_crtc *crtc)
4152
{
4163
{
4153
	struct drm_device *dev = crtc->base.dev;
4164
	struct drm_device *dev = crtc->base.dev;
4154
	struct drm_i915_private *dev_priv = dev->dev_private;
4165
	struct drm_i915_private *dev_priv = dev->dev_private;
4155
	int pipe = crtc->pipe;
4166
	int pipe = crtc->pipe;
4156
 
4167
 
4157
	/* To avoid upsetting the power well on haswell only disable the pfit if
4168
	/* To avoid upsetting the power well on haswell only disable the pfit if
4158
	 * it's in use. The hw state code will make sure we get this right. */
4169
	 * it's in use. The hw state code will make sure we get this right. */
4159
	if (crtc->config.pch_pfit.enabled) {
4170
	if (crtc->config.pch_pfit.enabled) {
4160
		I915_WRITE(PF_CTL(pipe), 0);
4171
		I915_WRITE(PF_CTL(pipe), 0);
4161
		I915_WRITE(PF_WIN_POS(pipe), 0);
4172
		I915_WRITE(PF_WIN_POS(pipe), 0);
4162
		I915_WRITE(PF_WIN_SZ(pipe), 0);
4173
		I915_WRITE(PF_WIN_SZ(pipe), 0);
4163
	}
4174
	}
4164
}
4175
}
4165
 
4176
 
4166
static void ironlake_crtc_disable(struct drm_crtc *crtc)
4177
static void ironlake_crtc_disable(struct drm_crtc *crtc)
4167
{
4178
{
4168
    struct drm_device *dev = crtc->dev;
4179
    struct drm_device *dev = crtc->dev;
4169
    struct drm_i915_private *dev_priv = dev->dev_private;
4180
    struct drm_i915_private *dev_priv = dev->dev_private;
4170
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4181
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4171
	struct intel_encoder *encoder;
4182
	struct intel_encoder *encoder;
4172
    int pipe = intel_crtc->pipe;
4183
    int pipe = intel_crtc->pipe;
4173
    u32 reg, temp;
4184
    u32 reg, temp;
4174
 
4185
 
4175
    if (!intel_crtc->active)
4186
    if (!intel_crtc->active)
4176
        return;
4187
        return;
4177
 
4188
 
4178
	intel_crtc_disable_planes(crtc);
4189
	intel_crtc_disable_planes(crtc);
4179
 
4190
 
4180
	for_each_encoder_on_crtc(dev, crtc, encoder)
4191
	for_each_encoder_on_crtc(dev, crtc, encoder)
4181
		encoder->disable(encoder);
4192
		encoder->disable(encoder);
4182
 
4193
 
4183
	if (intel_crtc->config.has_pch_encoder)
4194
	if (intel_crtc->config.has_pch_encoder)
4184
		intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
4195
		intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
4185
 
4196
 
4186
    intel_disable_pipe(dev_priv, pipe);
4197
    intel_disable_pipe(dev_priv, pipe);
4187
 
-
 
4188
	if (intel_crtc->config.dp_encoder_is_mst)
-
 
4189
		intel_ddi_set_vc_payload_alloc(crtc, false);
-
 
4190
 
-
 
4191
	ironlake_pfit_disable(intel_crtc);
4198
	ironlake_pfit_disable(intel_crtc);
4192
 
4199
 
4193
	for_each_encoder_on_crtc(dev, crtc, encoder)
4200
	for_each_encoder_on_crtc(dev, crtc, encoder)
4194
		if (encoder->post_disable)
4201
		if (encoder->post_disable)
4195
			encoder->post_disable(encoder);
4202
			encoder->post_disable(encoder);
4196
 
4203
 
4197
	if (intel_crtc->config.has_pch_encoder) {
4204
	if (intel_crtc->config.has_pch_encoder) {
4198
    ironlake_fdi_disable(crtc);
4205
    ironlake_fdi_disable(crtc);
4199
 
4206
 
4200
	ironlake_disable_pch_transcoder(dev_priv, pipe);
4207
	ironlake_disable_pch_transcoder(dev_priv, pipe);
4201
		intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4208
		intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4202
 
4209
 
4203
    if (HAS_PCH_CPT(dev)) {
4210
    if (HAS_PCH_CPT(dev)) {
4204
        /* disable TRANS_DP_CTL */
4211
        /* disable TRANS_DP_CTL */
4205
        reg = TRANS_DP_CTL(pipe);
4212
        reg = TRANS_DP_CTL(pipe);
4206
        temp = I915_READ(reg);
4213
        temp = I915_READ(reg);
4207
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4214
			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4208
				  TRANS_DP_PORT_SEL_MASK);
4215
				  TRANS_DP_PORT_SEL_MASK);
4209
        temp |= TRANS_DP_PORT_SEL_NONE;
4216
        temp |= TRANS_DP_PORT_SEL_NONE;
4210
        I915_WRITE(reg, temp);
4217
        I915_WRITE(reg, temp);
4211
 
4218
 
4212
        /* disable DPLL_SEL */
4219
        /* disable DPLL_SEL */
4213
        temp = I915_READ(PCH_DPLL_SEL);
4220
        temp = I915_READ(PCH_DPLL_SEL);
4214
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4221
			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4215
        I915_WRITE(PCH_DPLL_SEL, temp);
4222
        I915_WRITE(PCH_DPLL_SEL, temp);
4216
    }
4223
    }
4217
 
4224
 
4218
    /* disable PCH DPLL */
4225
    /* disable PCH DPLL */
4219
		intel_disable_shared_dpll(intel_crtc);
4226
		intel_disable_shared_dpll(intel_crtc);
4220
 
4227
 
4221
	ironlake_fdi_pll_disable(intel_crtc);
4228
	ironlake_fdi_pll_disable(intel_crtc);
4222
	}
4229
	}
4223
 
4230
 
4224
    intel_crtc->active = false;
4231
    intel_crtc->active = false;
4225
	intel_update_watermarks(crtc);
4232
	intel_update_watermarks(crtc);
4226
 
4233
 
4227
    mutex_lock(&dev->struct_mutex);
4234
    mutex_lock(&dev->struct_mutex);
4228
    intel_update_fbc(dev);
4235
    intel_update_fbc(dev);
4229
    mutex_unlock(&dev->struct_mutex);
4236
    mutex_unlock(&dev->struct_mutex);
4230
}
4237
}
4231
 
4238
 
4232
static void haswell_crtc_disable(struct drm_crtc *crtc)
4239
static void haswell_crtc_disable(struct drm_crtc *crtc)
4233
{
4240
{
4234
	struct drm_device *dev = crtc->dev;
4241
	struct drm_device *dev = crtc->dev;
4235
	struct drm_i915_private *dev_priv = dev->dev_private;
4242
	struct drm_i915_private *dev_priv = dev->dev_private;
4236
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4243
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4237
	struct intel_encoder *encoder;
4244
	struct intel_encoder *encoder;
4238
	int pipe = intel_crtc->pipe;
4245
	int pipe = intel_crtc->pipe;
4239
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4246
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4240
 
4247
 
4241
	if (!intel_crtc->active)
4248
	if (!intel_crtc->active)
4242
		return;
4249
		return;
4243
 
4250
 
4244
	intel_crtc_disable_planes(crtc);
4251
	intel_crtc_disable_planes(crtc);
4245
 
4252
 
4246
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4253
	for_each_encoder_on_crtc(dev, crtc, encoder) {
4247
		intel_opregion_notify_encoder(encoder, false);
4254
		intel_opregion_notify_encoder(encoder, false);
4248
		encoder->disable(encoder);
4255
		encoder->disable(encoder);
4249
	}
4256
	}
4250
 
4257
 
4251
	if (intel_crtc->config.has_pch_encoder)
4258
	if (intel_crtc->config.has_pch_encoder)
4252
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4259
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4253
	intel_disable_pipe(dev_priv, pipe);
4260
	intel_disable_pipe(dev_priv, pipe);
-
 
4261
 
-
 
4262
	if (intel_crtc->config.dp_encoder_is_mst)
-
 
4263
		intel_ddi_set_vc_payload_alloc(crtc, false);
4254
 
4264
 
4255
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4265
	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4256
 
4266
 
4257
	ironlake_pfit_disable(intel_crtc);
4267
	ironlake_pfit_disable(intel_crtc);
4258
 
4268
 
4259
	intel_ddi_disable_pipe_clock(intel_crtc);
4269
	intel_ddi_disable_pipe_clock(intel_crtc);
4260
 
4270
 
4261
	if (intel_crtc->config.has_pch_encoder) {
4271
	if (intel_crtc->config.has_pch_encoder) {
4262
		lpt_disable_pch_transcoder(dev_priv);
4272
		lpt_disable_pch_transcoder(dev_priv);
4263
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4273
		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4264
		intel_ddi_fdi_disable(crtc);
4274
		intel_ddi_fdi_disable(crtc);
4265
	}
4275
	}
4266
 
4276
 
4267
	for_each_encoder_on_crtc(dev, crtc, encoder)
4277
	for_each_encoder_on_crtc(dev, crtc, encoder)
4268
		if (encoder->post_disable)
4278
		if (encoder->post_disable)
4269
			encoder->post_disable(encoder);
4279
			encoder->post_disable(encoder);
4270
 
4280
 
4271
	intel_crtc->active = false;
4281
	intel_crtc->active = false;
4272
	intel_update_watermarks(crtc);
4282
	intel_update_watermarks(crtc);
4273
 
4283
 
4274
	mutex_lock(&dev->struct_mutex);
4284
	mutex_lock(&dev->struct_mutex);
4275
	intel_update_fbc(dev);
4285
	intel_update_fbc(dev);
4276
	mutex_unlock(&dev->struct_mutex);
4286
	mutex_unlock(&dev->struct_mutex);
4277
 
4287
 
4278
	if (intel_crtc_to_shared_dpll(intel_crtc))
4288
	if (intel_crtc_to_shared_dpll(intel_crtc))
4279
		intel_disable_shared_dpll(intel_crtc);
4289
		intel_disable_shared_dpll(intel_crtc);
4280
}
4290
}
4281
 
4291
 
4282
static void ironlake_crtc_off(struct drm_crtc *crtc)
4292
static void ironlake_crtc_off(struct drm_crtc *crtc)
4283
{
4293
{
4284
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4294
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4285
	intel_put_shared_dpll(intel_crtc);
4295
	intel_put_shared_dpll(intel_crtc);
4286
}
4296
}
4287
 
4297
 
4288
 
4298
 
4289
static void i9xx_pfit_enable(struct intel_crtc *crtc)
4299
static void i9xx_pfit_enable(struct intel_crtc *crtc)
4290
{
4300
{
4291
	struct drm_device *dev = crtc->base.dev;
4301
	struct drm_device *dev = crtc->base.dev;
4292
	struct drm_i915_private *dev_priv = dev->dev_private;
4302
	struct drm_i915_private *dev_priv = dev->dev_private;
4293
	struct intel_crtc_config *pipe_config = &crtc->config;
4303
	struct intel_crtc_config *pipe_config = &crtc->config;
4294
 
4304
 
4295
	if (!crtc->config.gmch_pfit.control)
4305
	if (!crtc->config.gmch_pfit.control)
4296
		return;
4306
		return;
4297
 
4307
 
4298
	/*
4308
	/*
4299
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
4309
	 * The panel fitter should only be adjusted whilst the pipe is disabled,
4300
	 * according to register description and PRM.
4310
	 * according to register description and PRM.
4301
	 */
4311
	 */
4302
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4312
	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4303
	assert_pipe_disabled(dev_priv, crtc->pipe);
4313
	assert_pipe_disabled(dev_priv, crtc->pipe);
4304
 
4314
 
4305
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4315
	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4306
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4316
	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4307
 
4317
 
4308
	/* Border color in case we don't scale up to the full screen. Black by
4318
	/* Border color in case we don't scale up to the full screen. Black by
4309
	 * default, change to something else for debugging. */
4319
	 * default, change to something else for debugging. */
4310
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
4320
	I915_WRITE(BCLRPAT(crtc->pipe), 0);
4311
}
4321
}
4312
 
4322
 
4313
static enum intel_display_power_domain port_to_power_domain(enum port port)
4323
static enum intel_display_power_domain port_to_power_domain(enum port port)
4314
{
4324
{
4315
	switch (port) {
4325
	switch (port) {
4316
	case PORT_A:
4326
	case PORT_A:
4317
		return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4327
		return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4318
	case PORT_B:
4328
	case PORT_B:
4319
		return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4329
		return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4320
	case PORT_C:
4330
	case PORT_C:
4321
		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4331
		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4322
	case PORT_D:
4332
	case PORT_D:
4323
		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4333
		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4324
	default:
4334
	default:
4325
		WARN_ON_ONCE(1);
4335
		WARN_ON_ONCE(1);
4326
		return POWER_DOMAIN_PORT_OTHER;
4336
		return POWER_DOMAIN_PORT_OTHER;
4327
	}
4337
	}
4328
}
4338
}
4329
 
4339
 
4330
#define for_each_power_domain(domain, mask)				\
4340
#define for_each_power_domain(domain, mask)				\
4331
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
4341
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
4332
		if ((1 << (domain)) & (mask))
4342
		if ((1 << (domain)) & (mask))
4333
 
4343
 
4334
enum intel_display_power_domain
4344
enum intel_display_power_domain
4335
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4345
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4336
{
4346
{
4337
	struct drm_device *dev = intel_encoder->base.dev;
4347
	struct drm_device *dev = intel_encoder->base.dev;
4338
	struct intel_digital_port *intel_dig_port;
4348
	struct intel_digital_port *intel_dig_port;
4339
 
4349
 
4340
	switch (intel_encoder->type) {
4350
	switch (intel_encoder->type) {
4341
	case INTEL_OUTPUT_UNKNOWN:
4351
	case INTEL_OUTPUT_UNKNOWN:
4342
		/* Only DDI platforms should ever use this output type */
4352
		/* Only DDI platforms should ever use this output type */
4343
		WARN_ON_ONCE(!HAS_DDI(dev));
4353
		WARN_ON_ONCE(!HAS_DDI(dev));
4344
	case INTEL_OUTPUT_DISPLAYPORT:
4354
	case INTEL_OUTPUT_DISPLAYPORT:
4345
	case INTEL_OUTPUT_HDMI:
4355
	case INTEL_OUTPUT_HDMI:
4346
	case INTEL_OUTPUT_EDP:
4356
	case INTEL_OUTPUT_EDP:
4347
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4357
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4348
		return port_to_power_domain(intel_dig_port->port);
4358
		return port_to_power_domain(intel_dig_port->port);
4349
	case INTEL_OUTPUT_DP_MST:
4359
	case INTEL_OUTPUT_DP_MST:
4350
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
4360
		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
4351
		return port_to_power_domain(intel_dig_port->port);
4361
		return port_to_power_domain(intel_dig_port->port);
4352
	case INTEL_OUTPUT_ANALOG:
4362
	case INTEL_OUTPUT_ANALOG:
4353
		return POWER_DOMAIN_PORT_CRT;
4363
		return POWER_DOMAIN_PORT_CRT;
4354
	case INTEL_OUTPUT_DSI:
4364
	case INTEL_OUTPUT_DSI:
4355
		return POWER_DOMAIN_PORT_DSI;
4365
		return POWER_DOMAIN_PORT_DSI;
4356
	default:
4366
	default:
4357
		return POWER_DOMAIN_PORT_OTHER;
4367
		return POWER_DOMAIN_PORT_OTHER;
4358
	}
4368
	}
4359
}
4369
}
4360
 
4370
 
4361
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4371
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4362
{
4372
{
4363
	struct drm_device *dev = crtc->dev;
4373
	struct drm_device *dev = crtc->dev;
4364
	struct intel_encoder *intel_encoder;
4374
	struct intel_encoder *intel_encoder;
4365
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4375
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4366
	enum pipe pipe = intel_crtc->pipe;
4376
	enum pipe pipe = intel_crtc->pipe;
4367
	unsigned long mask;
4377
	unsigned long mask;
4368
	enum transcoder transcoder;
4378
	enum transcoder transcoder;
4369
 
4379
 
4370
	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4380
	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4371
 
4381
 
4372
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
4382
	mask = BIT(POWER_DOMAIN_PIPE(pipe));
4373
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4383
	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4374
	if (intel_crtc->config.pch_pfit.enabled ||
4384
	if (intel_crtc->config.pch_pfit.enabled ||
4375
	    intel_crtc->config.pch_pfit.force_thru)
4385
	    intel_crtc->config.pch_pfit.force_thru)
4376
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4386
		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4377
 
4387
 
4378
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4388
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4379
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
4389
		mask |= BIT(intel_display_port_power_domain(intel_encoder));
4380
 
4390
 
4381
	return mask;
4391
	return mask;
4382
}
4392
}
4383
 
4393
 
4384
void intel_display_set_init_power(struct drm_i915_private *dev_priv,
4394
void intel_display_set_init_power(struct drm_i915_private *dev_priv,
4385
				  bool enable)
4395
				  bool enable)
4386
{
4396
{
4387
	if (dev_priv->power_domains.init_power_on == enable)
4397
	if (dev_priv->power_domains.init_power_on == enable)
4388
		return;
4398
		return;
4389
 
4399
 
4390
	if (enable)
4400
	if (enable)
4391
		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
4401
		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
4392
	else
4402
	else
4393
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
4403
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
4394
 
4404
 
4395
	dev_priv->power_domains.init_power_on = enable;
4405
	dev_priv->power_domains.init_power_on = enable;
4396
}
4406
}
4397
 
4407
 
4398
static void modeset_update_crtc_power_domains(struct drm_device *dev)
4408
static void modeset_update_crtc_power_domains(struct drm_device *dev)
4399
{
4409
{
4400
	struct drm_i915_private *dev_priv = dev->dev_private;
4410
	struct drm_i915_private *dev_priv = dev->dev_private;
4401
	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4411
	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4402
	struct intel_crtc *crtc;
4412
	struct intel_crtc *crtc;
4403
 
4413
 
4404
	/*
4414
	/*
4405
	 * First get all needed power domains, then put all unneeded, to avoid
4415
	 * First get all needed power domains, then put all unneeded, to avoid
4406
	 * any unnecessary toggling of the power wells.
4416
	 * any unnecessary toggling of the power wells.
4407
	 */
4417
	 */
4408
	for_each_intel_crtc(dev, crtc) {
4418
	for_each_intel_crtc(dev, crtc) {
4409
		enum intel_display_power_domain domain;
4419
		enum intel_display_power_domain domain;
4410
 
4420
 
4411
		if (!crtc->base.enabled)
4421
		if (!crtc->base.enabled)
4412
			continue;
4422
			continue;
4413
 
4423
 
4414
		pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4424
		pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4415
 
4425
 
4416
		for_each_power_domain(domain, pipe_domains[crtc->pipe])
4426
		for_each_power_domain(domain, pipe_domains[crtc->pipe])
4417
			intel_display_power_get(dev_priv, domain);
4427
			intel_display_power_get(dev_priv, domain);
4418
	}
4428
	}
4419
 
4429
 
4420
	for_each_intel_crtc(dev, crtc) {
4430
	for_each_intel_crtc(dev, crtc) {
4421
		enum intel_display_power_domain domain;
4431
		enum intel_display_power_domain domain;
4422
 
4432
 
4423
		for_each_power_domain(domain, crtc->enabled_power_domains)
4433
		for_each_power_domain(domain, crtc->enabled_power_domains)
4424
			intel_display_power_put(dev_priv, domain);
4434
			intel_display_power_put(dev_priv, domain);
4425
 
4435
 
4426
		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4436
		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4427
	}
4437
	}
4428
 
4438
 
4429
	intel_display_set_init_power(dev_priv, false);
4439
	intel_display_set_init_power(dev_priv, false);
4430
}
4440
}
4431
 
4441
 
4432
/* returns HPLL frequency in kHz */
4442
/* returns HPLL frequency in kHz */
4433
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4443
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4434
{
4444
{
4435
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4445
	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4436
 
4446
 
4437
	/* Obtain SKU information */
4447
	/* Obtain SKU information */
4438
	mutex_lock(&dev_priv->dpio_lock);
4448
	mutex_lock(&dev_priv->dpio_lock);
4439
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4449
	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4440
		CCK_FUSE_HPLL_FREQ_MASK;
4450
		CCK_FUSE_HPLL_FREQ_MASK;
4441
	mutex_unlock(&dev_priv->dpio_lock);
4451
	mutex_unlock(&dev_priv->dpio_lock);
4442
 
4452
 
4443
	return vco_freq[hpll_freq] * 1000;
4453
	return vco_freq[hpll_freq] * 1000;
4444
}
4454
}
4445
 
4455
 
4446
static void vlv_update_cdclk(struct drm_device *dev)
4456
static void vlv_update_cdclk(struct drm_device *dev)
4447
{
4457
{
4448
	struct drm_i915_private *dev_priv = dev->dev_private;
4458
	struct drm_i915_private *dev_priv = dev->dev_private;
4449
 
4459
 
4450
	dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4460
	dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4451
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
4461
	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
4452
			 dev_priv->vlv_cdclk_freq);
4462
			 dev_priv->vlv_cdclk_freq);
4453
 
4463
 
4454
	/*
4464
	/*
4455
	 * Program the gmbus_freq based on the cdclk frequency.
4465
	 * Program the gmbus_freq based on the cdclk frequency.
4456
	 * BSpec erroneously claims we should aim for 4MHz, but
4466
	 * BSpec erroneously claims we should aim for 4MHz, but
4457
	 * in fact 1MHz is the correct frequency.
4467
	 * in fact 1MHz is the correct frequency.
4458
	 */
4468
	 */
4459
	I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
4469
	I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
4460
}
4470
}
4461
 
4471
 
4462
/* Adjust CDclk dividers to allow high res or save power if possible */
4472
/* Adjust CDclk dividers to allow high res or save power if possible */
4463
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4473
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4464
{
4474
{
4465
	struct drm_i915_private *dev_priv = dev->dev_private;
4475
	struct drm_i915_private *dev_priv = dev->dev_private;
4466
	u32 val, cmd;
4476
	u32 val, cmd;
4467
 
4477
 
4468
	WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4478
	WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4469
 
4479
 
4470
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4480
	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4471
		cmd = 2;
4481
		cmd = 2;
4472
	else if (cdclk == 266667)
4482
	else if (cdclk == 266667)
4473
		cmd = 1;
4483
		cmd = 1;
4474
	else
4484
	else
4475
		cmd = 0;
4485
		cmd = 0;
4476
 
4486
 
4477
	mutex_lock(&dev_priv->rps.hw_lock);
4487
	mutex_lock(&dev_priv->rps.hw_lock);
4478
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4488
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4479
	val &= ~DSPFREQGUAR_MASK;
4489
	val &= ~DSPFREQGUAR_MASK;
4480
	val |= (cmd << DSPFREQGUAR_SHIFT);
4490
	val |= (cmd << DSPFREQGUAR_SHIFT);
4481
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4491
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4482
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4492
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4483
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4493
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4484
		     50)) {
4494
		     50)) {
4485
		DRM_ERROR("timed out waiting for CDclk change\n");
4495
		DRM_ERROR("timed out waiting for CDclk change\n");
4486
	}
4496
	}
4487
	mutex_unlock(&dev_priv->rps.hw_lock);
4497
	mutex_unlock(&dev_priv->rps.hw_lock);
4488
 
4498
 
4489
	if (cdclk == 400000) {
4499
	if (cdclk == 400000) {
4490
		u32 divider, vco;
4500
		u32 divider, vco;
4491
 
4501
 
4492
		vco = valleyview_get_vco(dev_priv);
4502
		vco = valleyview_get_vco(dev_priv);
4493
		divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
4503
		divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
4494
 
4504
 
4495
		mutex_lock(&dev_priv->dpio_lock);
4505
		mutex_lock(&dev_priv->dpio_lock);
4496
		/* adjust cdclk divider */
4506
		/* adjust cdclk divider */
4497
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4507
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4498
		val &= ~DISPLAY_FREQUENCY_VALUES;
4508
		val &= ~DISPLAY_FREQUENCY_VALUES;
4499
		val |= divider;
4509
		val |= divider;
4500
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4510
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4501
 
4511
 
4502
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4512
		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4503
			      DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4513
			      DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4504
			     50))
4514
			     50))
4505
			DRM_ERROR("timed out waiting for CDclk change\n");
4515
			DRM_ERROR("timed out waiting for CDclk change\n");
4506
		mutex_unlock(&dev_priv->dpio_lock);
4516
		mutex_unlock(&dev_priv->dpio_lock);
4507
	}
4517
	}
4508
 
4518
 
4509
	mutex_lock(&dev_priv->dpio_lock);
4519
	mutex_lock(&dev_priv->dpio_lock);
4510
	/* adjust self-refresh exit latency value */
4520
	/* adjust self-refresh exit latency value */
4511
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4521
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4512
	val &= ~0x7f;
4522
	val &= ~0x7f;
4513
 
4523
 
4514
	/*
4524
	/*
4515
	 * For high bandwidth configs, we set a higher latency in the bunit
4525
	 * For high bandwidth configs, we set a higher latency in the bunit
4516
	 * so that the core display fetch happens in time to avoid underruns.
4526
	 * so that the core display fetch happens in time to avoid underruns.
4517
	 */
4527
	 */
4518
	if (cdclk == 400000)
4528
	if (cdclk == 400000)
4519
		val |= 4500 / 250; /* 4.5 usec */
4529
		val |= 4500 / 250; /* 4.5 usec */
4520
	else
4530
	else
4521
		val |= 3000 / 250; /* 3.0 usec */
4531
		val |= 3000 / 250; /* 3.0 usec */
4522
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4532
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4523
	mutex_unlock(&dev_priv->dpio_lock);
4533
	mutex_unlock(&dev_priv->dpio_lock);
4524
 
4534
 
4525
	vlv_update_cdclk(dev);
4535
	vlv_update_cdclk(dev);
4526
}
4536
}
4527
 
4537
 
4528
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4538
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4529
				 int max_pixclk)
4539
				 int max_pixclk)
4530
{
4540
{
4531
	int vco = valleyview_get_vco(dev_priv);
4541
	int vco = valleyview_get_vco(dev_priv);
4532
	int freq_320 = (vco <<  1) % 320000 != 0 ? 333333 : 320000;
4542
	int freq_320 = (vco <<  1) % 320000 != 0 ? 333333 : 320000;
4533
 
4543
 
4534
	/*
4544
	/*
4535
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
4545
	 * Really only a few cases to deal with, as only 4 CDclks are supported:
4536
	 *   200MHz
4546
	 *   200MHz
4537
	 *   267MHz
4547
	 *   267MHz
4538
	 *   320/333MHz (depends on HPLL freq)
4548
	 *   320/333MHz (depends on HPLL freq)
4539
	 *   400MHz
4549
	 *   400MHz
4540
	 * So we check to see whether we're above 90% of the lower bin and
4550
	 * So we check to see whether we're above 90% of the lower bin and
4541
	 * adjust if needed.
4551
	 * adjust if needed.
4542
	 *
4552
	 *
4543
	 * We seem to get an unstable or solid color picture at 200MHz.
4553
	 * We seem to get an unstable or solid color picture at 200MHz.
4544
	 * Not sure what's wrong. For now use 200MHz only when all pipes
4554
	 * Not sure what's wrong. For now use 200MHz only when all pipes
4545
	 * are off.
4555
	 * are off.
4546
	 */
4556
	 */
4547
	if (max_pixclk > freq_320*9/10)
4557
	if (max_pixclk > freq_320*9/10)
4548
		return 400000;
4558
		return 400000;
4549
	else if (max_pixclk > 266667*9/10)
4559
	else if (max_pixclk > 266667*9/10)
4550
		return freq_320;
4560
		return freq_320;
4551
	else if (max_pixclk > 0)
4561
	else if (max_pixclk > 0)
4552
		return 266667;
4562
		return 266667;
4553
	else
4563
	else
4554
		return 200000;
4564
		return 200000;
4555
}
4565
}
4556
 
4566
 
4557
/* compute the max pixel clock for new configuration */
4567
/* compute the max pixel clock for new configuration */
4558
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4568
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4559
{
4569
{
4560
	struct drm_device *dev = dev_priv->dev;
4570
	struct drm_device *dev = dev_priv->dev;
4561
	struct intel_crtc *intel_crtc;
4571
	struct intel_crtc *intel_crtc;
4562
	int max_pixclk = 0;
4572
	int max_pixclk = 0;
4563
 
4573
 
4564
	for_each_intel_crtc(dev, intel_crtc) {
4574
	for_each_intel_crtc(dev, intel_crtc) {
4565
		if (intel_crtc->new_enabled)
4575
		if (intel_crtc->new_enabled)
4566
			max_pixclk = max(max_pixclk,
4576
			max_pixclk = max(max_pixclk,
4567
					 intel_crtc->new_config->adjusted_mode.crtc_clock);
4577
					 intel_crtc->new_config->adjusted_mode.crtc_clock);
4568
	}
4578
	}
4569
 
4579
 
4570
	return max_pixclk;
4580
	return max_pixclk;
4571
}
4581
}
4572
 
4582
 
4573
static void valleyview_modeset_global_pipes(struct drm_device *dev,
4583
static void valleyview_modeset_global_pipes(struct drm_device *dev,
4574
					    unsigned *prepare_pipes)
4584
					    unsigned *prepare_pipes)
4575
{
4585
{
4576
	struct drm_i915_private *dev_priv = dev->dev_private;
4586
	struct drm_i915_private *dev_priv = dev->dev_private;
4577
	struct intel_crtc *intel_crtc;
4587
	struct intel_crtc *intel_crtc;
4578
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4588
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4579
 
4589
 
4580
	if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4590
	if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4581
	    dev_priv->vlv_cdclk_freq)
4591
	    dev_priv->vlv_cdclk_freq)
4582
		return;
4592
		return;
4583
 
4593
 
4584
	/* disable/enable all currently active pipes while we change cdclk */
4594
	/* disable/enable all currently active pipes while we change cdclk */
4585
	for_each_intel_crtc(dev, intel_crtc)
4595
	for_each_intel_crtc(dev, intel_crtc)
4586
		if (intel_crtc->base.enabled)
4596
		if (intel_crtc->base.enabled)
4587
			*prepare_pipes |= (1 << intel_crtc->pipe);
4597
			*prepare_pipes |= (1 << intel_crtc->pipe);
4588
}
4598
}
4589
 
4599
 
4590
static void valleyview_modeset_global_resources(struct drm_device *dev)
4600
static void valleyview_modeset_global_resources(struct drm_device *dev)
4591
{
4601
{
4592
	struct drm_i915_private *dev_priv = dev->dev_private;
4602
	struct drm_i915_private *dev_priv = dev->dev_private;
4593
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4603
	int max_pixclk = intel_mode_max_pixclk(dev_priv);
4594
	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4604
	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4595
 
4605
 
4596
	if (req_cdclk != dev_priv->vlv_cdclk_freq)
4606
	if (req_cdclk != dev_priv->vlv_cdclk_freq)
4597
		valleyview_set_cdclk(dev, req_cdclk);
4607
		valleyview_set_cdclk(dev, req_cdclk);
4598
	modeset_update_crtc_power_domains(dev);
4608
	modeset_update_crtc_power_domains(dev);
4599
}
4609
}
4600
 
4610
 
4601
static void valleyview_crtc_enable(struct drm_crtc *crtc)
4611
static void valleyview_crtc_enable(struct drm_crtc *crtc)
4602
{
4612
{
4603
	struct drm_device *dev = crtc->dev;
4613
	struct drm_device *dev = crtc->dev;
4604
	struct drm_i915_private *dev_priv = dev->dev_private;
4614
	struct drm_i915_private *dev_priv = dev->dev_private;
4605
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4615
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4606
	struct intel_encoder *encoder;
4616
	struct intel_encoder *encoder;
4607
	int pipe = intel_crtc->pipe;
4617
	int pipe = intel_crtc->pipe;
4608
	int plane = intel_crtc->plane;
4618
	int plane = intel_crtc->plane;
4609
	bool is_dsi;
4619
	bool is_dsi;
4610
	u32 dspcntr;
4620
	u32 dspcntr;
4611
 
4621
 
4612
	WARN_ON(!crtc->enabled);
4622
	WARN_ON(!crtc->enabled);
4613
 
4623
 
4614
	if (intel_crtc->active)
4624
	if (intel_crtc->active)
4615
		return;
4625
		return;
4616
 
4626
 
4617
	is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4627
	is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4618
 
4628
 
4619
	if (!is_dsi && !IS_CHERRYVIEW(dev))
4629
	if (!is_dsi && !IS_CHERRYVIEW(dev))
4620
	vlv_prepare_pll(intel_crtc);
4630
	vlv_prepare_pll(intel_crtc);
4621
 
4631
 
4622
	/* Set up the display plane register */
4632
	/* Set up the display plane register */
4623
	dspcntr = DISPPLANE_GAMMA_ENABLE;
4633
	dspcntr = DISPPLANE_GAMMA_ENABLE;
4624
 
4634
 
4625
	if (intel_crtc->config.has_dp_encoder)
4635
	if (intel_crtc->config.has_dp_encoder)
4626
		intel_dp_set_m_n(intel_crtc);
4636
		intel_dp_set_m_n(intel_crtc);
4627
 
4637
 
4628
	intel_set_pipe_timings(intel_crtc);
4638
	intel_set_pipe_timings(intel_crtc);
4629
 
4639
 
4630
	/* pipesrc and dspsize control the size that is scaled from,
4640
	/* pipesrc and dspsize control the size that is scaled from,
4631
	 * which should always be the user's requested size.
4641
	 * which should always be the user's requested size.
4632
	 */
4642
	 */
4633
	I915_WRITE(DSPSIZE(plane),
4643
	I915_WRITE(DSPSIZE(plane),
4634
		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
4644
		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
4635
		   (intel_crtc->config.pipe_src_w - 1));
4645
		   (intel_crtc->config.pipe_src_w - 1));
4636
	I915_WRITE(DSPPOS(plane), 0);
4646
	I915_WRITE(DSPPOS(plane), 0);
4637
 
4647
 
4638
	i9xx_set_pipeconf(intel_crtc);
4648
	i9xx_set_pipeconf(intel_crtc);
4639
 
4649
 
4640
	I915_WRITE(DSPCNTR(plane), dspcntr);
4650
	I915_WRITE(DSPCNTR(plane), dspcntr);
4641
	POSTING_READ(DSPCNTR(plane));
4651
	POSTING_READ(DSPCNTR(plane));
4642
 
4652
 
4643
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4653
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4644
					       crtc->x, crtc->y);
4654
					       crtc->x, crtc->y);
4645
 
4655
 
4646
	intel_crtc->active = true;
4656
	intel_crtc->active = true;
4647
 
4657
 
4648
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4658
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4649
 
4659
 
4650
	for_each_encoder_on_crtc(dev, crtc, encoder)
4660
	for_each_encoder_on_crtc(dev, crtc, encoder)
4651
		if (encoder->pre_pll_enable)
4661
		if (encoder->pre_pll_enable)
4652
			encoder->pre_pll_enable(encoder);
4662
			encoder->pre_pll_enable(encoder);
4653
 
4663
 
4654
	if (!is_dsi) {
4664
	if (!is_dsi) {
4655
		if (IS_CHERRYVIEW(dev))
4665
		if (IS_CHERRYVIEW(dev))
4656
			chv_enable_pll(intel_crtc);
4666
			chv_enable_pll(intel_crtc);
4657
		else
4667
		else
4658
	vlv_enable_pll(intel_crtc);
4668
	vlv_enable_pll(intel_crtc);
4659
	}
4669
	}
4660
 
4670
 
4661
	for_each_encoder_on_crtc(dev, crtc, encoder)
4671
	for_each_encoder_on_crtc(dev, crtc, encoder)
4662
		if (encoder->pre_enable)
4672
		if (encoder->pre_enable)
4663
			encoder->pre_enable(encoder);
4673
			encoder->pre_enable(encoder);
4664
 
4674
 
4665
	i9xx_pfit_enable(intel_crtc);
4675
	i9xx_pfit_enable(intel_crtc);
4666
 
4676
 
4667
	intel_crtc_load_lut(crtc);
4677
	intel_crtc_load_lut(crtc);
4668
 
4678
 
4669
	intel_update_watermarks(crtc);
4679
	intel_update_watermarks(crtc);
4670
	intel_enable_pipe(intel_crtc);
4680
	intel_enable_pipe(intel_crtc);
4671
 
4681
 
4672
	for_each_encoder_on_crtc(dev, crtc, encoder)
4682
	for_each_encoder_on_crtc(dev, crtc, encoder)
4673
		encoder->enable(encoder);
4683
		encoder->enable(encoder);
4674
 
4684
 
4675
	intel_crtc_enable_planes(crtc);
4685
	intel_crtc_enable_planes(crtc);
4676
 
4686
 
4677
	/* Underruns don't raise interrupts, so check manually. */
4687
	/* Underruns don't raise interrupts, so check manually. */
4678
	i9xx_check_fifo_underruns(dev);
4688
	i9xx_check_fifo_underruns(dev);
4679
}
4689
}
4680
 
4690
 
4681
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4691
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4682
{
4692
{
4683
	struct drm_device *dev = crtc->base.dev;
4693
	struct drm_device *dev = crtc->base.dev;
4684
	struct drm_i915_private *dev_priv = dev->dev_private;
4694
	struct drm_i915_private *dev_priv = dev->dev_private;
4685
 
4695
 
4686
	I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
4696
	I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
4687
	I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
4697
	I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
4688
}
4698
}
4689
 
4699
 
4690
static void i9xx_crtc_enable(struct drm_crtc *crtc)
4700
static void i9xx_crtc_enable(struct drm_crtc *crtc)
4691
{
4701
{
4692
    struct drm_device *dev = crtc->dev;
4702
    struct drm_device *dev = crtc->dev;
4693
    struct drm_i915_private *dev_priv = dev->dev_private;
4703
    struct drm_i915_private *dev_priv = dev->dev_private;
4694
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4704
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4695
	struct intel_encoder *encoder;
4705
	struct intel_encoder *encoder;
4696
    int pipe = intel_crtc->pipe;
4706
    int pipe = intel_crtc->pipe;
4697
    int plane = intel_crtc->plane;
4707
    int plane = intel_crtc->plane;
4698
	u32 dspcntr;
4708
	u32 dspcntr;
4699
 
4709
 
4700
	WARN_ON(!crtc->enabled);
4710
	WARN_ON(!crtc->enabled);
4701
 
4711
 
4702
    if (intel_crtc->active)
4712
    if (intel_crtc->active)
4703
        return;
4713
        return;
4704
 
4714
 
4705
	i9xx_set_pll_dividers(intel_crtc);
4715
	i9xx_set_pll_dividers(intel_crtc);
4706
 
4716
 
4707
	/* Set up the display plane register */
4717
	/* Set up the display plane register */
4708
	dspcntr = DISPPLANE_GAMMA_ENABLE;
4718
	dspcntr = DISPPLANE_GAMMA_ENABLE;
4709
 
4719
 
4710
	if (pipe == 0)
4720
	if (pipe == 0)
4711
		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4721
		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4712
	else
4722
	else
4713
		dspcntr |= DISPPLANE_SEL_PIPE_B;
4723
		dspcntr |= DISPPLANE_SEL_PIPE_B;
4714
 
4724
 
4715
	if (intel_crtc->config.has_dp_encoder)
4725
	if (intel_crtc->config.has_dp_encoder)
4716
		intel_dp_set_m_n(intel_crtc);
4726
		intel_dp_set_m_n(intel_crtc);
4717
 
4727
 
4718
	intel_set_pipe_timings(intel_crtc);
4728
	intel_set_pipe_timings(intel_crtc);
4719
 
4729
 
4720
	/* pipesrc and dspsize control the size that is scaled from,
4730
	/* pipesrc and dspsize control the size that is scaled from,
4721
	 * which should always be the user's requested size.
4731
	 * which should always be the user's requested size.
4722
	 */
4732
	 */
4723
	I915_WRITE(DSPSIZE(plane),
4733
	I915_WRITE(DSPSIZE(plane),
4724
		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
4734
		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
4725
		   (intel_crtc->config.pipe_src_w - 1));
4735
		   (intel_crtc->config.pipe_src_w - 1));
4726
	I915_WRITE(DSPPOS(plane), 0);
4736
	I915_WRITE(DSPPOS(plane), 0);
4727
 
4737
 
4728
	i9xx_set_pipeconf(intel_crtc);
4738
	i9xx_set_pipeconf(intel_crtc);
4729
 
4739
 
4730
	I915_WRITE(DSPCNTR(plane), dspcntr);
4740
	I915_WRITE(DSPCNTR(plane), dspcntr);
4731
	POSTING_READ(DSPCNTR(plane));
4741
	POSTING_READ(DSPCNTR(plane));
4732
 
4742
 
4733
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4743
	dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4734
					       crtc->x, crtc->y);
4744
					       crtc->x, crtc->y);
4735
 
4745
 
4736
    intel_crtc->active = true;
4746
    intel_crtc->active = true;
4737
 
4747
 
4738
	if (!IS_GEN2(dev))
4748
	if (!IS_GEN2(dev))
4739
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4749
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4740
 
4750
 
4741
	for_each_encoder_on_crtc(dev, crtc, encoder)
4751
	for_each_encoder_on_crtc(dev, crtc, encoder)
4742
		if (encoder->pre_enable)
4752
		if (encoder->pre_enable)
4743
			encoder->pre_enable(encoder);
4753
			encoder->pre_enable(encoder);
4744
 
4754
 
4745
	i9xx_enable_pll(intel_crtc);
4755
	i9xx_enable_pll(intel_crtc);
4746
 
4756
 
4747
	i9xx_pfit_enable(intel_crtc);
4757
	i9xx_pfit_enable(intel_crtc);
4748
 
4758
 
4749
	intel_crtc_load_lut(crtc);
4759
	intel_crtc_load_lut(crtc);
4750
 
4760
 
4751
	intel_update_watermarks(crtc);
4761
	intel_update_watermarks(crtc);
4752
	intel_enable_pipe(intel_crtc);
4762
	intel_enable_pipe(intel_crtc);
4753
 
4763
 
4754
	for_each_encoder_on_crtc(dev, crtc, encoder)
4764
	for_each_encoder_on_crtc(dev, crtc, encoder)
4755
		encoder->enable(encoder);
4765
		encoder->enable(encoder);
4756
 
4766
 
4757
	intel_crtc_enable_planes(crtc);
4767
	intel_crtc_enable_planes(crtc);
4758
 
4768
 
4759
	/*
4769
	/*
4760
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4770
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4761
	 * So don't enable underrun reporting before at least some planes
4771
	 * So don't enable underrun reporting before at least some planes
4762
	 * are enabled.
4772
	 * are enabled.
4763
	 * FIXME: Need to fix the logic to work when we turn off all planes
4773
	 * FIXME: Need to fix the logic to work when we turn off all planes
4764
	 * but leave the pipe running.
4774
	 * but leave the pipe running.
4765
	 */
4775
	 */
4766
	if (IS_GEN2(dev))
4776
	if (IS_GEN2(dev))
4767
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4777
	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4768
 
4778
 
4769
	/* Underruns don't raise interrupts, so check manually. */
4779
	/* Underruns don't raise interrupts, so check manually. */
4770
	i9xx_check_fifo_underruns(dev);
4780
	i9xx_check_fifo_underruns(dev);
4771
}
4781
}
4772
 
4782
 
4773
static void i9xx_pfit_disable(struct intel_crtc *crtc)
4783
static void i9xx_pfit_disable(struct intel_crtc *crtc)
4774
{
4784
{
4775
	struct drm_device *dev = crtc->base.dev;
4785
	struct drm_device *dev = crtc->base.dev;
4776
	struct drm_i915_private *dev_priv = dev->dev_private;
4786
	struct drm_i915_private *dev_priv = dev->dev_private;
4777
 
4787
 
4778
	if (!crtc->config.gmch_pfit.control)
4788
	if (!crtc->config.gmch_pfit.control)
4779
		return;
4789
		return;
4780
 
4790
 
4781
	assert_pipe_disabled(dev_priv, crtc->pipe);
4791
	assert_pipe_disabled(dev_priv, crtc->pipe);
4782
 
4792
 
4783
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4793
	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4784
			 I915_READ(PFIT_CONTROL));
4794
			 I915_READ(PFIT_CONTROL));
4785
		I915_WRITE(PFIT_CONTROL, 0);
4795
		I915_WRITE(PFIT_CONTROL, 0);
4786
}
4796
}
4787
 
4797
 
4788
static void i9xx_crtc_disable(struct drm_crtc *crtc)
4798
static void i9xx_crtc_disable(struct drm_crtc *crtc)
4789
{
4799
{
4790
    struct drm_device *dev = crtc->dev;
4800
    struct drm_device *dev = crtc->dev;
4791
    struct drm_i915_private *dev_priv = dev->dev_private;
4801
    struct drm_i915_private *dev_priv = dev->dev_private;
4792
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4802
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4793
	struct intel_encoder *encoder;
4803
	struct intel_encoder *encoder;
4794
    int pipe = intel_crtc->pipe;
4804
    int pipe = intel_crtc->pipe;
4795
 
4805
 
4796
    if (!intel_crtc->active)
4806
    if (!intel_crtc->active)
4797
        return;
4807
        return;
4798
 
4808
 
4799
	/*
4809
	/*
4800
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4810
	 * Gen2 reports pipe underruns whenever all planes are disabled.
4801
	 * So diasble underrun reporting before all the planes get disabled.
4811
	 * So diasble underrun reporting before all the planes get disabled.
4802
	 * FIXME: Need to fix the logic to work when we turn off all planes
4812
	 * FIXME: Need to fix the logic to work when we turn off all planes
4803
	 * but leave the pipe running.
4813
	 * but leave the pipe running.
4804
	 */
4814
	 */
4805
	if (IS_GEN2(dev))
4815
	if (IS_GEN2(dev))
4806
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4816
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4807
 
4817
 
4808
	/*
4818
	/*
4809
	 * Vblank time updates from the shadow to live plane control register
4819
	 * Vblank time updates from the shadow to live plane control register
4810
	 * are blocked if the memory self-refresh mode is active at that
4820
	 * are blocked if the memory self-refresh mode is active at that
4811
	 * moment. So to make sure the plane gets truly disabled, disable
4821
	 * moment. So to make sure the plane gets truly disabled, disable
4812
	 * first the self-refresh mode. The self-refresh enable bit in turn
4822
	 * first the self-refresh mode. The self-refresh enable bit in turn
4813
	 * will be checked/applied by the HW only at the next frame start
4823
	 * will be checked/applied by the HW only at the next frame start
4814
	 * event which is after the vblank start event, so we need to have a
4824
	 * event which is after the vblank start event, so we need to have a
4815
	 * wait-for-vblank between disabling the plane and the pipe.
4825
	 * wait-for-vblank between disabling the plane and the pipe.
4816
	 */
4826
	 */
4817
	intel_set_memory_cxsr(dev_priv, false);
4827
	intel_set_memory_cxsr(dev_priv, false);
4818
	intel_crtc_disable_planes(crtc);
4828
	intel_crtc_disable_planes(crtc);
4819
 
4829
 
4820
	for_each_encoder_on_crtc(dev, crtc, encoder)
4830
	for_each_encoder_on_crtc(dev, crtc, encoder)
4821
		encoder->disable(encoder);
4831
		encoder->disable(encoder);
4822
 
4832
 
4823
	/*
4833
	/*
4824
	 * On gen2 planes are double buffered but the pipe isn't, so we must
4834
	 * On gen2 planes are double buffered but the pipe isn't, so we must
4825
	 * wait for planes to fully turn off before disabling the pipe.
4835
	 * wait for planes to fully turn off before disabling the pipe.
4826
	 * We also need to wait on all gmch platforms because of the
4836
	 * We also need to wait on all gmch platforms because of the
4827
	 * self-refresh mode constraint explained above.
4837
	 * self-refresh mode constraint explained above.
4828
	 */
4838
	 */
4829
		intel_wait_for_vblank(dev, pipe);
4839
		intel_wait_for_vblank(dev, pipe);
4830
 
4840
 
4831
    intel_disable_pipe(dev_priv, pipe);
4841
    intel_disable_pipe(dev_priv, pipe);
4832
 
4842
 
4833
	i9xx_pfit_disable(intel_crtc);
4843
	i9xx_pfit_disable(intel_crtc);
4834
 
4844
 
4835
	for_each_encoder_on_crtc(dev, crtc, encoder)
4845
	for_each_encoder_on_crtc(dev, crtc, encoder)
4836
		if (encoder->post_disable)
4846
		if (encoder->post_disable)
4837
			encoder->post_disable(encoder);
4847
			encoder->post_disable(encoder);
4838
 
4848
 
4839
	if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
4849
	if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
4840
		if (IS_CHERRYVIEW(dev))
4850
		if (IS_CHERRYVIEW(dev))
4841
			chv_disable_pll(dev_priv, pipe);
4851
			chv_disable_pll(dev_priv, pipe);
4842
		else if (IS_VALLEYVIEW(dev))
4852
		else if (IS_VALLEYVIEW(dev))
4843
		vlv_disable_pll(dev_priv, pipe);
4853
		vlv_disable_pll(dev_priv, pipe);
4844
		else
4854
		else
4845
	i9xx_disable_pll(dev_priv, pipe);
4855
	i9xx_disable_pll(dev_priv, pipe);
4846
	}
4856
	}
4847
 
4857
 
4848
	if (!IS_GEN2(dev))
4858
	if (!IS_GEN2(dev))
4849
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4859
		intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4850
 
4860
 
4851
    intel_crtc->active = false;
4861
    intel_crtc->active = false;
4852
	intel_update_watermarks(crtc);
4862
	intel_update_watermarks(crtc);
4853
 
4863
 
4854
	mutex_lock(&dev->struct_mutex);
4864
	mutex_lock(&dev->struct_mutex);
4855
    intel_update_fbc(dev);
4865
    intel_update_fbc(dev);
4856
	mutex_unlock(&dev->struct_mutex);
4866
	mutex_unlock(&dev->struct_mutex);
4857
}
4867
}
4858
 
4868
 
4859
static void i9xx_crtc_off(struct drm_crtc *crtc)
4869
static void i9xx_crtc_off(struct drm_crtc *crtc)
4860
{
4870
{
4861
}
4871
}
4862
 
4872
 
4863
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4873
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4864
				    bool enabled)
4874
				    bool enabled)
4865
{
4875
{
4866
	struct drm_device *dev = crtc->dev;
4876
	struct drm_device *dev = crtc->dev;
4867
	struct drm_i915_master_private *master_priv;
4877
	struct drm_i915_master_private *master_priv;
4868
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4878
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4869
	int pipe = intel_crtc->pipe;
4879
	int pipe = intel_crtc->pipe;
4870
 
4880
 
4871
 
4881
 
4872
#if 0
4882
#if 0
4873
	if (!dev->primary->master)
4883
	if (!dev->primary->master)
4874
		return;
4884
		return;
4875
 
4885
 
4876
	master_priv = dev->primary->master->driver_priv;
4886
	master_priv = dev->primary->master->driver_priv;
4877
	if (!master_priv->sarea_priv)
4887
	if (!master_priv->sarea_priv)
4878
		return;
4888
		return;
4879
 
4889
 
4880
	switch (pipe) {
4890
	switch (pipe) {
4881
	case 0:
4891
	case 0:
4882
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
4892
		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
4883
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
4893
		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
4884
		break;
4894
		break;
4885
	case 1:
4895
	case 1:
4886
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
4896
		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
4887
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
4897
		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
4888
		break;
4898
		break;
4889
	default:
4899
	default:
4890
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
4900
		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
4891
		break;
4901
		break;
4892
	}
4902
	}
4893
#endif
4903
#endif
4894
}
4904
}
4895
 
4905
 
4896
/* Master function to enable/disable CRTC and corresponding power wells */
4906
/* Master function to enable/disable CRTC and corresponding power wells */
4897
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
4907
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
4898
{
4908
{
4899
	struct drm_device *dev = crtc->dev;
4909
	struct drm_device *dev = crtc->dev;
4900
	struct drm_i915_private *dev_priv = dev->dev_private;
4910
	struct drm_i915_private *dev_priv = dev->dev_private;
4901
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4911
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4902
	enum intel_display_power_domain domain;
4912
	enum intel_display_power_domain domain;
4903
	unsigned long domains;
4913
	unsigned long domains;
4904
 
4914
 
4905
	if (enable) {
4915
	if (enable) {
4906
		if (!intel_crtc->active) {
4916
		if (!intel_crtc->active) {
4907
			domains = get_crtc_power_domains(crtc);
4917
			domains = get_crtc_power_domains(crtc);
4908
			for_each_power_domain(domain, domains)
4918
			for_each_power_domain(domain, domains)
4909
				intel_display_power_get(dev_priv, domain);
4919
				intel_display_power_get(dev_priv, domain);
4910
			intel_crtc->enabled_power_domains = domains;
4920
			intel_crtc->enabled_power_domains = domains;
4911
 
4921
 
4912
			dev_priv->display.crtc_enable(crtc);
4922
			dev_priv->display.crtc_enable(crtc);
4913
		}
4923
		}
4914
	} else {
4924
	} else {
4915
		if (intel_crtc->active) {
4925
		if (intel_crtc->active) {
4916
			dev_priv->display.crtc_disable(crtc);
4926
			dev_priv->display.crtc_disable(crtc);
4917
 
4927
 
4918
			domains = intel_crtc->enabled_power_domains;
4928
			domains = intel_crtc->enabled_power_domains;
4919
			for_each_power_domain(domain, domains)
4929
			for_each_power_domain(domain, domains)
4920
				intel_display_power_put(dev_priv, domain);
4930
				intel_display_power_put(dev_priv, domain);
4921
			intel_crtc->enabled_power_domains = 0;
4931
			intel_crtc->enabled_power_domains = 0;
4922
		}
4932
		}
4923
	}
4933
	}
4924
}
4934
}
4925
 
4935
 
4926
/**
4936
/**
4927
 * Sets the power management mode of the pipe and plane.
4937
 * Sets the power management mode of the pipe and plane.
4928
 */
4938
 */
4929
void intel_crtc_update_dpms(struct drm_crtc *crtc)
4939
void intel_crtc_update_dpms(struct drm_crtc *crtc)
4930
{
4940
{
4931
	struct drm_device *dev = crtc->dev;
4941
	struct drm_device *dev = crtc->dev;
4932
	struct intel_encoder *intel_encoder;
4942
	struct intel_encoder *intel_encoder;
4933
	bool enable = false;
4943
	bool enable = false;
4934
 
4944
 
4935
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4945
	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4936
		enable |= intel_encoder->connectors_active;
4946
		enable |= intel_encoder->connectors_active;
4937
 
4947
 
4938
	intel_crtc_control(crtc, enable);
4948
	intel_crtc_control(crtc, enable);
4939
 
4949
 
4940
	intel_crtc_update_sarea(crtc, enable);
4950
	intel_crtc_update_sarea(crtc, enable);
4941
}
4951
}
4942
 
4952
 
4943
static void intel_crtc_disable(struct drm_crtc *crtc)
4953
static void intel_crtc_disable(struct drm_crtc *crtc)
4944
{
4954
{
4945
	struct drm_device *dev = crtc->dev;
4955
	struct drm_device *dev = crtc->dev;
4946
	struct drm_connector *connector;
4956
	struct drm_connector *connector;
4947
	struct drm_i915_private *dev_priv = dev->dev_private;
4957
	struct drm_i915_private *dev_priv = dev->dev_private;
4948
	struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
4958
	struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
4949
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
4959
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
4950
 
4960
 
4951
	/* crtc should still be enabled when we disable it. */
4961
	/* crtc should still be enabled when we disable it. */
4952
	WARN_ON(!crtc->enabled);
4962
	WARN_ON(!crtc->enabled);
4953
 
4963
 
4954
	dev_priv->display.crtc_disable(crtc);
4964
	dev_priv->display.crtc_disable(crtc);
4955
	intel_crtc_update_sarea(crtc, false);
4965
	intel_crtc_update_sarea(crtc, false);
4956
	dev_priv->display.off(crtc);
4966
	dev_priv->display.off(crtc);
4957
 
4967
 
4958
	if (crtc->primary->fb) {
4968
	if (crtc->primary->fb) {
4959
		mutex_lock(&dev->struct_mutex);
4969
		mutex_lock(&dev->struct_mutex);
4960
		intel_unpin_fb_obj(old_obj);
4970
		intel_unpin_fb_obj(old_obj);
4961
		i915_gem_track_fb(old_obj, NULL,
4971
		i915_gem_track_fb(old_obj, NULL,
4962
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
4972
				  INTEL_FRONTBUFFER_PRIMARY(pipe));
4963
		mutex_unlock(&dev->struct_mutex);
4973
		mutex_unlock(&dev->struct_mutex);
4964
		crtc->primary->fb = NULL;
4974
		crtc->primary->fb = NULL;
4965
	}
4975
	}
4966
 
4976
 
4967
	/* Update computed state. */
4977
	/* Update computed state. */
4968
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4978
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4969
		if (!connector->encoder || !connector->encoder->crtc)
4979
		if (!connector->encoder || !connector->encoder->crtc)
4970
			continue;
4980
			continue;
4971
 
4981
 
4972
		if (connector->encoder->crtc != crtc)
4982
		if (connector->encoder->crtc != crtc)
4973
			continue;
4983
			continue;
4974
 
4984
 
4975
		connector->dpms = DRM_MODE_DPMS_OFF;
4985
		connector->dpms = DRM_MODE_DPMS_OFF;
4976
		to_intel_encoder(connector->encoder)->connectors_active = false;
4986
		to_intel_encoder(connector->encoder)->connectors_active = false;
4977
	}
4987
	}
4978
}
4988
}
4979
 
4989
 
4980
void intel_encoder_destroy(struct drm_encoder *encoder)
4990
void intel_encoder_destroy(struct drm_encoder *encoder)
4981
{
4991
{
4982
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4992
	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4983
 
4993
 
4984
	drm_encoder_cleanup(encoder);
4994
	drm_encoder_cleanup(encoder);
4985
	kfree(intel_encoder);
4995
	kfree(intel_encoder);
4986
}
4996
}
4987
 
4997
 
4988
/* Simple dpms helper for encoders with just one connector, no cloning and only
4998
/* Simple dpms helper for encoders with just one connector, no cloning and only
4989
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
4999
 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
4990
 * state of the entire output pipe. */
5000
 * state of the entire output pipe. */
4991
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
5001
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
4992
{
5002
{
4993
	if (mode == DRM_MODE_DPMS_ON) {
5003
	if (mode == DRM_MODE_DPMS_ON) {
4994
		encoder->connectors_active = true;
5004
		encoder->connectors_active = true;
4995
 
5005
 
4996
		intel_crtc_update_dpms(encoder->base.crtc);
5006
		intel_crtc_update_dpms(encoder->base.crtc);
4997
	} else {
5007
	} else {
4998
		encoder->connectors_active = false;
5008
		encoder->connectors_active = false;
4999
 
5009
 
5000
		intel_crtc_update_dpms(encoder->base.crtc);
5010
		intel_crtc_update_dpms(encoder->base.crtc);
5001
	}
5011
	}
5002
}
5012
}
5003
 
5013
 
5004
/* Cross check the actual hw state with our own modeset state tracking (and it's
5014
/* Cross check the actual hw state with our own modeset state tracking (and it's
5005
 * internal consistency). */
5015
 * internal consistency). */
5006
static void intel_connector_check_state(struct intel_connector *connector)
5016
static void intel_connector_check_state(struct intel_connector *connector)
5007
{
5017
{
5008
	if (connector->get_hw_state(connector)) {
5018
	if (connector->get_hw_state(connector)) {
5009
		struct intel_encoder *encoder = connector->encoder;
5019
		struct intel_encoder *encoder = connector->encoder;
5010
		struct drm_crtc *crtc;
5020
		struct drm_crtc *crtc;
5011
		bool encoder_enabled;
5021
		bool encoder_enabled;
5012
		enum pipe pipe;
5022
		enum pipe pipe;
5013
 
5023
 
5014
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5024
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5015
			      connector->base.base.id,
5025
			      connector->base.base.id,
5016
			      connector->base.name);
5026
			      connector->base.name);
5017
 
5027
 
5018
		/* there is no real hw state for MST connectors */
5028
		/* there is no real hw state for MST connectors */
5019
		if (connector->mst_port)
5029
		if (connector->mst_port)
5020
			return;
5030
			return;
5021
 
5031
 
5022
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
5032
		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
5023
		     "wrong connector dpms state\n");
5033
		     "wrong connector dpms state\n");
5024
		WARN(connector->base.encoder != &encoder->base,
5034
		WARN(connector->base.encoder != &encoder->base,
5025
		     "active connector not linked to encoder\n");
5035
		     "active connector not linked to encoder\n");
5026
 
5036
 
5027
		if (encoder) {
5037
		if (encoder) {
5028
		WARN(!encoder->connectors_active,
5038
		WARN(!encoder->connectors_active,
5029
		     "encoder->connectors_active not set\n");
5039
		     "encoder->connectors_active not set\n");
5030
 
5040
 
5031
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5041
		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5032
		WARN(!encoder_enabled, "encoder not enabled\n");
5042
		WARN(!encoder_enabled, "encoder not enabled\n");
5033
		if (WARN_ON(!encoder->base.crtc))
5043
		if (WARN_ON(!encoder->base.crtc))
5034
			return;
5044
			return;
5035
 
5045
 
5036
		crtc = encoder->base.crtc;
5046
		crtc = encoder->base.crtc;
5037
 
5047
 
5038
		WARN(!crtc->enabled, "crtc not enabled\n");
5048
		WARN(!crtc->enabled, "crtc not enabled\n");
5039
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5049
		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5040
		WARN(pipe != to_intel_crtc(crtc)->pipe,
5050
		WARN(pipe != to_intel_crtc(crtc)->pipe,
5041
		     "encoder active on the wrong pipe\n");
5051
		     "encoder active on the wrong pipe\n");
5042
	}
5052
	}
5043
	}
5053
	}
5044
}
5054
}
5045
 
5055
 
5046
/* Even simpler default implementation, if there's really no special case to
5056
/* Even simpler default implementation, if there's really no special case to
5047
 * consider. */
5057
 * consider. */
5048
void intel_connector_dpms(struct drm_connector *connector, int mode)
5058
void intel_connector_dpms(struct drm_connector *connector, int mode)
5049
{
5059
{
5050
	/* All the simple cases only support two dpms states. */
5060
	/* All the simple cases only support two dpms states. */
5051
	if (mode != DRM_MODE_DPMS_ON)
5061
	if (mode != DRM_MODE_DPMS_ON)
5052
		mode = DRM_MODE_DPMS_OFF;
5062
		mode = DRM_MODE_DPMS_OFF;
5053
 
5063
 
5054
	if (mode == connector->dpms)
5064
	if (mode == connector->dpms)
5055
		return;
5065
		return;
5056
 
5066
 
5057
	connector->dpms = mode;
5067
	connector->dpms = mode;
5058
 
5068
 
5059
	/* Only need to change hw state when actually enabled */
5069
	/* Only need to change hw state when actually enabled */
5060
	if (connector->encoder)
5070
	if (connector->encoder)
5061
		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
5071
		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
5062
 
5072
 
5063
	intel_modeset_check_state(connector->dev);
5073
	intel_modeset_check_state(connector->dev);
5064
}
5074
}
5065
 
5075
 
5066
/* Simple connector->get_hw_state implementation for encoders that support only
5076
/* Simple connector->get_hw_state implementation for encoders that support only
5067
 * one connector and no cloning and hence the encoder state determines the state
5077
 * one connector and no cloning and hence the encoder state determines the state
5068
 * of the connector. */
5078
 * of the connector. */
5069
bool intel_connector_get_hw_state(struct intel_connector *connector)
5079
bool intel_connector_get_hw_state(struct intel_connector *connector)
5070
{
5080
{
5071
	enum pipe pipe = 0;
5081
	enum pipe pipe = 0;
5072
	struct intel_encoder *encoder = connector->encoder;
5082
	struct intel_encoder *encoder = connector->encoder;
5073
 
5083
 
5074
	return encoder->get_hw_state(encoder, &pipe);
5084
	return encoder->get_hw_state(encoder, &pipe);
5075
}
5085
}
5076
 
5086
 
5077
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5087
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5078
				     struct intel_crtc_config *pipe_config)
5088
				     struct intel_crtc_config *pipe_config)
5079
{
5089
{
5080
	struct drm_i915_private *dev_priv = dev->dev_private;
5090
	struct drm_i915_private *dev_priv = dev->dev_private;
5081
	struct intel_crtc *pipe_B_crtc =
5091
	struct intel_crtc *pipe_B_crtc =
5082
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5092
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5083
 
5093
 
5084
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5094
	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5085
		      pipe_name(pipe), pipe_config->fdi_lanes);
5095
		      pipe_name(pipe), pipe_config->fdi_lanes);
5086
	if (pipe_config->fdi_lanes > 4) {
5096
	if (pipe_config->fdi_lanes > 4) {
5087
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5097
		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5088
			      pipe_name(pipe), pipe_config->fdi_lanes);
5098
			      pipe_name(pipe), pipe_config->fdi_lanes);
5089
		return false;
5099
		return false;
5090
	}
5100
	}
5091
 
5101
 
5092
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5102
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5093
		if (pipe_config->fdi_lanes > 2) {
5103
		if (pipe_config->fdi_lanes > 2) {
5094
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5104
			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5095
				      pipe_config->fdi_lanes);
5105
				      pipe_config->fdi_lanes);
5096
			return false;
5106
			return false;
5097
		} else {
5107
		} else {
5098
			return true;
5108
			return true;
5099
		}
5109
		}
5100
	}
5110
	}
5101
 
5111
 
5102
	if (INTEL_INFO(dev)->num_pipes == 2)
5112
	if (INTEL_INFO(dev)->num_pipes == 2)
5103
		return true;
5113
		return true;
5104
 
5114
 
5105
	/* Ivybridge 3 pipe is really complicated */
5115
	/* Ivybridge 3 pipe is really complicated */
5106
	switch (pipe) {
5116
	switch (pipe) {
5107
	case PIPE_A:
5117
	case PIPE_A:
5108
		return true;
5118
		return true;
5109
	case PIPE_B:
5119
	case PIPE_B:
5110
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5120
		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5111
		    pipe_config->fdi_lanes > 2) {
5121
		    pipe_config->fdi_lanes > 2) {
5112
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5122
			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5113
				      pipe_name(pipe), pipe_config->fdi_lanes);
5123
				      pipe_name(pipe), pipe_config->fdi_lanes);
5114
			return false;
5124
			return false;
5115
		}
5125
		}
5116
		return true;
5126
		return true;
5117
	case PIPE_C:
5127
	case PIPE_C:
5118
		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5128
		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5119
		    pipe_B_crtc->config.fdi_lanes <= 2) {
5129
		    pipe_B_crtc->config.fdi_lanes <= 2) {
5120
			if (pipe_config->fdi_lanes > 2) {
5130
			if (pipe_config->fdi_lanes > 2) {
5121
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5131
				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5122
					      pipe_name(pipe), pipe_config->fdi_lanes);
5132
					      pipe_name(pipe), pipe_config->fdi_lanes);
5123
				return false;
5133
				return false;
5124
			}
5134
			}
5125
		} else {
5135
		} else {
5126
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5136
			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5127
			return false;
5137
			return false;
5128
		}
5138
		}
5129
		return true;
5139
		return true;
5130
	default:
5140
	default:
5131
		BUG();
5141
		BUG();
5132
	}
5142
	}
5133
}
5143
}
5134
 
5144
 
5135
#define RETRY 1
5145
#define RETRY 1
5136
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5146
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5137
				      struct intel_crtc_config *pipe_config)
5147
				      struct intel_crtc_config *pipe_config)
5138
{
5148
{
5139
	struct drm_device *dev = intel_crtc->base.dev;
5149
	struct drm_device *dev = intel_crtc->base.dev;
5140
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5150
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5141
	int lane, link_bw, fdi_dotclock;
5151
	int lane, link_bw, fdi_dotclock;
5142
	bool setup_ok, needs_recompute = false;
5152
	bool setup_ok, needs_recompute = false;
5143
 
5153
 
5144
retry:
5154
retry:
5145
	/* FDI is a binary signal running at ~2.7GHz, encoding
5155
	/* FDI is a binary signal running at ~2.7GHz, encoding
5146
	 * each output octet as 10 bits. The actual frequency
5156
	 * each output octet as 10 bits. The actual frequency
5147
	 * is stored as a divider into a 100MHz clock, and the
5157
	 * is stored as a divider into a 100MHz clock, and the
5148
	 * mode pixel clock is stored in units of 1KHz.
5158
	 * mode pixel clock is stored in units of 1KHz.
5149
	 * Hence the bw of each lane in terms of the mode signal
5159
	 * Hence the bw of each lane in terms of the mode signal
5150
	 * is:
5160
	 * is:
5151
	 */
5161
	 */
5152
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5162
	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5153
 
5163
 
5154
	fdi_dotclock = adjusted_mode->crtc_clock;
5164
	fdi_dotclock = adjusted_mode->crtc_clock;
5155
 
5165
 
5156
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5166
	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5157
					   pipe_config->pipe_bpp);
5167
					   pipe_config->pipe_bpp);
5158
 
5168
 
5159
	pipe_config->fdi_lanes = lane;
5169
	pipe_config->fdi_lanes = lane;
5160
 
5170
 
5161
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5171
	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5162
			       link_bw, &pipe_config->fdi_m_n);
5172
			       link_bw, &pipe_config->fdi_m_n);
5163
 
5173
 
5164
	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5174
	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5165
					    intel_crtc->pipe, pipe_config);
5175
					    intel_crtc->pipe, pipe_config);
5166
	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5176
	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5167
		pipe_config->pipe_bpp -= 2*3;
5177
		pipe_config->pipe_bpp -= 2*3;
5168
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5178
		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5169
			      pipe_config->pipe_bpp);
5179
			      pipe_config->pipe_bpp);
5170
		needs_recompute = true;
5180
		needs_recompute = true;
5171
		pipe_config->bw_constrained = true;
5181
		pipe_config->bw_constrained = true;
5172
 
5182
 
5173
		goto retry;
5183
		goto retry;
5174
	}
5184
	}
5175
 
5185
 
5176
	if (needs_recompute)
5186
	if (needs_recompute)
5177
		return RETRY;
5187
		return RETRY;
5178
 
5188
 
5179
	return setup_ok ? 0 : -EINVAL;
5189
	return setup_ok ? 0 : -EINVAL;
5180
}
5190
}
5181
 
5191
 
5182
static void hsw_compute_ips_config(struct intel_crtc *crtc,
5192
static void hsw_compute_ips_config(struct intel_crtc *crtc,
5183
				   struct intel_crtc_config *pipe_config)
5193
				   struct intel_crtc_config *pipe_config)
5184
{
5194
{
5185
	pipe_config->ips_enabled = i915.enable_ips &&
5195
	pipe_config->ips_enabled = i915.enable_ips &&
5186
				   hsw_crtc_supports_ips(crtc) &&
5196
				   hsw_crtc_supports_ips(crtc) &&
5187
				   pipe_config->pipe_bpp <= 24;
5197
				   pipe_config->pipe_bpp <= 24;
5188
}
5198
}
5189
 
5199
 
5190
static int intel_crtc_compute_config(struct intel_crtc *crtc,
5200
static int intel_crtc_compute_config(struct intel_crtc *crtc,
5191
				     struct intel_crtc_config *pipe_config)
5201
				     struct intel_crtc_config *pipe_config)
5192
{
5202
{
5193
	struct drm_device *dev = crtc->base.dev;
5203
	struct drm_device *dev = crtc->base.dev;
5194
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5204
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5195
 
5205
 
5196
	/* FIXME should check pixel clock limits on all platforms */
5206
	/* FIXME should check pixel clock limits on all platforms */
5197
	if (INTEL_INFO(dev)->gen < 4) {
5207
	if (INTEL_INFO(dev)->gen < 4) {
5198
		struct drm_i915_private *dev_priv = dev->dev_private;
5208
		struct drm_i915_private *dev_priv = dev->dev_private;
5199
		int clock_limit =
5209
		int clock_limit =
5200
			dev_priv->display.get_display_clock_speed(dev);
5210
			dev_priv->display.get_display_clock_speed(dev);
5201
 
5211
 
5202
		/*
5212
		/*
5203
		 * Enable pixel doubling when the dot clock
5213
		 * Enable pixel doubling when the dot clock
5204
		 * is > 90% of the (display) core speed.
5214
		 * is > 90% of the (display) core speed.
5205
		 *
5215
		 *
5206
		 * GDG double wide on either pipe,
5216
		 * GDG double wide on either pipe,
5207
		 * otherwise pipe A only.
5217
		 * otherwise pipe A only.
5208
		 */
5218
		 */
5209
		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5219
		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5210
		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5220
		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5211
			clock_limit *= 2;
5221
			clock_limit *= 2;
5212
			pipe_config->double_wide = true;
5222
			pipe_config->double_wide = true;
5213
		}
5223
		}
5214
 
5224
 
5215
		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
5225
		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
5216
			return -EINVAL;
5226
			return -EINVAL;
5217
	}
5227
	}
5218
 
5228
 
5219
	/*
5229
	/*
5220
	 * Pipe horizontal size must be even in:
5230
	 * Pipe horizontal size must be even in:
5221
	 * - DVO ganged mode
5231
	 * - DVO ganged mode
5222
	 * - LVDS dual channel mode
5232
	 * - LVDS dual channel mode
5223
	 * - Double wide pipe
5233
	 * - Double wide pipe
5224
	 */
5234
	 */
5225
	if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5235
	if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5226
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5236
	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5227
		pipe_config->pipe_src_w &= ~1;
5237
		pipe_config->pipe_src_w &= ~1;
5228
 
5238
 
5229
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
5239
	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
5230
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5240
	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5231
	 */
5241
	 */
5232
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5242
	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5233
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
5243
		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
5234
		return -EINVAL;
5244
		return -EINVAL;
5235
 
5245
 
5236
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5246
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5237
		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5247
		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5238
	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5248
	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5239
		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
5249
		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
5240
		 * for lvds. */
5250
		 * for lvds. */
5241
		pipe_config->pipe_bpp = 8*3;
5251
		pipe_config->pipe_bpp = 8*3;
5242
	}
5252
	}
5243
 
5253
 
5244
	if (HAS_IPS(dev))
5254
	if (HAS_IPS(dev))
5245
		hsw_compute_ips_config(crtc, pipe_config);
5255
		hsw_compute_ips_config(crtc, pipe_config);
5246
 
5256
 
5247
	/*
5257
	/*
5248
	 * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
5258
	 * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
5249
	 * old clock survives for now.
5259
	 * old clock survives for now.
5250
	 */
5260
	 */
5251
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
5261
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
5252
		pipe_config->shared_dpll = crtc->config.shared_dpll;
5262
		pipe_config->shared_dpll = crtc->config.shared_dpll;
5253
 
5263
 
5254
	if (pipe_config->has_pch_encoder)
5264
	if (pipe_config->has_pch_encoder)
5255
		return ironlake_fdi_compute_config(crtc, pipe_config);
5265
		return ironlake_fdi_compute_config(crtc, pipe_config);
5256
 
5266
 
5257
	return 0;
5267
	return 0;
5258
}
5268
}
5259
 
5269
 
5260
static int valleyview_get_display_clock_speed(struct drm_device *dev)
5270
static int valleyview_get_display_clock_speed(struct drm_device *dev)
5261
{
5271
{
5262
	struct drm_i915_private *dev_priv = dev->dev_private;
5272
	struct drm_i915_private *dev_priv = dev->dev_private;
5263
	int vco = valleyview_get_vco(dev_priv);
5273
	int vco = valleyview_get_vco(dev_priv);
5264
	u32 val;
5274
	u32 val;
5265
	int divider;
5275
	int divider;
5266
 
5276
 
5267
	mutex_lock(&dev_priv->dpio_lock);
5277
	mutex_lock(&dev_priv->dpio_lock);
5268
	val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5278
	val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5269
	mutex_unlock(&dev_priv->dpio_lock);
5279
	mutex_unlock(&dev_priv->dpio_lock);
5270
 
5280
 
5271
	divider = val & DISPLAY_FREQUENCY_VALUES;
5281
	divider = val & DISPLAY_FREQUENCY_VALUES;
5272
 
5282
 
5273
	WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5283
	WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5274
	     (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5284
	     (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5275
	     "cdclk change in progress\n");
5285
	     "cdclk change in progress\n");
5276
 
5286
 
5277
	return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
5287
	return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
5278
}
5288
}
5279
 
5289
 
5280
static int i945_get_display_clock_speed(struct drm_device *dev)
5290
static int i945_get_display_clock_speed(struct drm_device *dev)
5281
{
5291
{
5282
	return 400000;
5292
	return 400000;
5283
}
5293
}
5284
 
5294
 
5285
static int i915_get_display_clock_speed(struct drm_device *dev)
5295
static int i915_get_display_clock_speed(struct drm_device *dev)
5286
{
5296
{
5287
	return 333000;
5297
	return 333000;
5288
}
5298
}
5289
 
5299
 
5290
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5300
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5291
{
5301
{
5292
	return 200000;
5302
	return 200000;
5293
}
5303
}
5294
 
5304
 
5295
static int pnv_get_display_clock_speed(struct drm_device *dev)
5305
static int pnv_get_display_clock_speed(struct drm_device *dev)
5296
{
5306
{
5297
	u16 gcfgc = 0;
5307
	u16 gcfgc = 0;
5298
 
5308
 
5299
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5309
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5300
 
5310
 
5301
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5311
	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5302
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5312
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5303
		return 267000;
5313
		return 267000;
5304
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5314
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5305
		return 333000;
5315
		return 333000;
5306
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5316
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5307
		return 444000;
5317
		return 444000;
5308
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5318
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5309
		return 200000;
5319
		return 200000;
5310
	default:
5320
	default:
5311
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5321
		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5312
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5322
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5313
		return 133000;
5323
		return 133000;
5314
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5324
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5315
		return 167000;
5325
		return 167000;
5316
	}
5326
	}
5317
}
5327
}
5318
 
5328
 
5319
static int i915gm_get_display_clock_speed(struct drm_device *dev)
5329
static int i915gm_get_display_clock_speed(struct drm_device *dev)
5320
{
5330
{
5321
	u16 gcfgc = 0;
5331
	u16 gcfgc = 0;
5322
 
5332
 
5323
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5333
	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5324
 
5334
 
5325
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5335
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5326
		return 133000;
5336
		return 133000;
5327
	else {
5337
	else {
5328
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5338
		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5329
		case GC_DISPLAY_CLOCK_333_MHZ:
5339
		case GC_DISPLAY_CLOCK_333_MHZ:
5330
			return 333000;
5340
			return 333000;
5331
		default:
5341
		default:
5332
		case GC_DISPLAY_CLOCK_190_200_MHZ:
5342
		case GC_DISPLAY_CLOCK_190_200_MHZ:
5333
			return 190000;
5343
			return 190000;
5334
		}
5344
		}
5335
	}
5345
	}
5336
}
5346
}
5337
 
5347
 
5338
static int i865_get_display_clock_speed(struct drm_device *dev)
5348
static int i865_get_display_clock_speed(struct drm_device *dev)
5339
{
5349
{
5340
	return 266000;
5350
	return 266000;
5341
}
5351
}
5342
 
5352
 
5343
static int i855_get_display_clock_speed(struct drm_device *dev)
5353
static int i855_get_display_clock_speed(struct drm_device *dev)
5344
{
5354
{
5345
	u16 hpllcc = 0;
5355
	u16 hpllcc = 0;
5346
	/* Assume that the hardware is in the high speed state.  This
5356
	/* Assume that the hardware is in the high speed state.  This
5347
	 * should be the default.
5357
	 * should be the default.
5348
	 */
5358
	 */
5349
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5359
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5350
	case GC_CLOCK_133_200:
5360
	case GC_CLOCK_133_200:
5351
	case GC_CLOCK_100_200:
5361
	case GC_CLOCK_100_200:
5352
		return 200000;
5362
		return 200000;
5353
	case GC_CLOCK_166_250:
5363
	case GC_CLOCK_166_250:
5354
		return 250000;
5364
		return 250000;
5355
	case GC_CLOCK_100_133:
5365
	case GC_CLOCK_100_133:
5356
		return 133000;
5366
		return 133000;
5357
	}
5367
	}
5358
 
5368
 
5359
	/* Shouldn't happen */
5369
	/* Shouldn't happen */
5360
	return 0;
5370
	return 0;
5361
}
5371
}
5362
 
5372
 
5363
static int i830_get_display_clock_speed(struct drm_device *dev)
5373
static int i830_get_display_clock_speed(struct drm_device *dev)
5364
{
5374
{
5365
	return 133000;
5375
	return 133000;
5366
}
5376
}
5367
 
5377
 
5368
static void
5378
static void
5369
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
5379
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
5370
{
5380
{
5371
	while (*num > DATA_LINK_M_N_MASK ||
5381
	while (*num > DATA_LINK_M_N_MASK ||
5372
	       *den > DATA_LINK_M_N_MASK) {
5382
	       *den > DATA_LINK_M_N_MASK) {
5373
		*num >>= 1;
5383
		*num >>= 1;
5374
		*den >>= 1;
5384
		*den >>= 1;
5375
	}
5385
	}
5376
}
5386
}
5377
 
5387
 
5378
static void compute_m_n(unsigned int m, unsigned int n,
5388
static void compute_m_n(unsigned int m, unsigned int n,
5379
			uint32_t *ret_m, uint32_t *ret_n)
5389
			uint32_t *ret_m, uint32_t *ret_n)
5380
{
5390
{
5381
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5391
	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5382
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
5392
	*ret_m = div_u64((uint64_t) m * *ret_n, n);
5383
	intel_reduce_m_n_ratio(ret_m, ret_n);
5393
	intel_reduce_m_n_ratio(ret_m, ret_n);
5384
}
5394
}
5385
 
5395
 
5386
void
5396
void
5387
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5397
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5388
		       int pixel_clock, int link_clock,
5398
		       int pixel_clock, int link_clock,
5389
		       struct intel_link_m_n *m_n)
5399
		       struct intel_link_m_n *m_n)
5390
{
5400
{
5391
	m_n->tu = 64;
5401
	m_n->tu = 64;
5392
 
5402
 
5393
	compute_m_n(bits_per_pixel * pixel_clock,
5403
	compute_m_n(bits_per_pixel * pixel_clock,
5394
		    link_clock * nlanes * 8,
5404
		    link_clock * nlanes * 8,
5395
		    &m_n->gmch_m, &m_n->gmch_n);
5405
		    &m_n->gmch_m, &m_n->gmch_n);
5396
 
5406
 
5397
	compute_m_n(pixel_clock, link_clock,
5407
	compute_m_n(pixel_clock, link_clock,
5398
		    &m_n->link_m, &m_n->link_n);
5408
		    &m_n->link_m, &m_n->link_n);
5399
}
5409
}
5400
 
5410
 
5401
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5411
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5402
{
5412
{
5403
	if (i915.panel_use_ssc >= 0)
5413
	if (i915.panel_use_ssc >= 0)
5404
		return i915.panel_use_ssc != 0;
5414
		return i915.panel_use_ssc != 0;
5405
	return dev_priv->vbt.lvds_use_ssc
5415
	return dev_priv->vbt.lvds_use_ssc
5406
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5416
		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5407
}
5417
}
5408
 
5418
 
5409
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5419
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5410
{
5420
{
5411
	struct drm_device *dev = crtc->dev;
5421
	struct drm_device *dev = crtc->dev;
5412
	struct drm_i915_private *dev_priv = dev->dev_private;
5422
	struct drm_i915_private *dev_priv = dev->dev_private;
5413
	int refclk;
5423
	int refclk;
5414
 
5424
 
5415
	if (IS_VALLEYVIEW(dev)) {
5425
	if (IS_VALLEYVIEW(dev)) {
5416
		refclk = 100000;
5426
		refclk = 100000;
5417
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5427
	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5418
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5428
	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5419
		refclk = dev_priv->vbt.lvds_ssc_freq;
5429
		refclk = dev_priv->vbt.lvds_ssc_freq;
5420
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
5430
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
5421
	} else if (!IS_GEN2(dev)) {
5431
	} else if (!IS_GEN2(dev)) {
5422
		refclk = 96000;
5432
		refclk = 96000;
5423
	} else {
5433
	} else {
5424
		refclk = 48000;
5434
		refclk = 48000;
5425
	}
5435
	}
5426
 
5436
 
5427
	return refclk;
5437
	return refclk;
5428
}
5438
}
5429
 
5439
 
5430
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
5440
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
5431
{
5441
{
5432
	return (1 << dpll->n) << 16 | dpll->m2;
5442
	return (1 << dpll->n) << 16 | dpll->m2;
5433
}
5443
}
5434
 
5444
 
5435
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5445
static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5436
{
5446
{
5437
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
5447
	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
5438
}
5448
}
5439
 
5449
 
5440
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5450
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5441
				     intel_clock_t *reduced_clock)
5451
				     intel_clock_t *reduced_clock)
5442
{
5452
{
5443
	struct drm_device *dev = crtc->base.dev;
5453
	struct drm_device *dev = crtc->base.dev;
5444
	u32 fp, fp2 = 0;
5454
	u32 fp, fp2 = 0;
5445
 
5455
 
5446
	if (IS_PINEVIEW(dev)) {
5456
	if (IS_PINEVIEW(dev)) {
5447
		fp = pnv_dpll_compute_fp(&crtc->config.dpll);
5457
		fp = pnv_dpll_compute_fp(&crtc->config.dpll);
5448
		if (reduced_clock)
5458
		if (reduced_clock)
5449
			fp2 = pnv_dpll_compute_fp(reduced_clock);
5459
			fp2 = pnv_dpll_compute_fp(reduced_clock);
5450
	} else {
5460
	} else {
5451
		fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
5461
		fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
5452
		if (reduced_clock)
5462
		if (reduced_clock)
5453
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
5463
			fp2 = i9xx_dpll_compute_fp(reduced_clock);
5454
	}
5464
	}
5455
 
5465
 
5456
	crtc->config.dpll_hw_state.fp0 = fp;
5466
	crtc->config.dpll_hw_state.fp0 = fp;
5457
 
5467
 
5458
	crtc->lowfreq_avail = false;
5468
	crtc->lowfreq_avail = false;
5459
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5469
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5460
	    reduced_clock && i915.powersave) {
5470
	    reduced_clock && i915.powersave) {
5461
		crtc->config.dpll_hw_state.fp1 = fp2;
5471
		crtc->config.dpll_hw_state.fp1 = fp2;
5462
		crtc->lowfreq_avail = true;
5472
		crtc->lowfreq_avail = true;
5463
	} else {
5473
	} else {
5464
		crtc->config.dpll_hw_state.fp1 = fp;
5474
		crtc->config.dpll_hw_state.fp1 = fp;
5465
	}
5475
	}
5466
}
5476
}
5467
 
5477
 
5468
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5478
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5469
		pipe)
5479
		pipe)
5470
{
5480
{
5471
	u32 reg_val;
5481
	u32 reg_val;
5472
 
5482
 
5473
	/*
5483
	/*
5474
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
5484
	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
5475
	 * and set it to a reasonable value instead.
5485
	 * and set it to a reasonable value instead.
5476
	 */
5486
	 */
5477
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5487
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5478
	reg_val &= 0xffffff00;
5488
	reg_val &= 0xffffff00;
5479
	reg_val |= 0x00000030;
5489
	reg_val |= 0x00000030;
5480
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5490
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5481
 
5491
 
5482
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5492
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5483
	reg_val &= 0x8cffffff;
5493
	reg_val &= 0x8cffffff;
5484
	reg_val = 0x8c000000;
5494
	reg_val = 0x8c000000;
5485
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5495
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5486
 
5496
 
5487
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5497
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5488
	reg_val &= 0xffffff00;
5498
	reg_val &= 0xffffff00;
5489
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5499
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5490
 
5500
 
5491
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5501
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5492
	reg_val &= 0x00ffffff;
5502
	reg_val &= 0x00ffffff;
5493
	reg_val |= 0xb0000000;
5503
	reg_val |= 0xb0000000;
5494
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5504
	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5495
}
5505
}
5496
 
5506
 
5497
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5507
static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5498
					 struct intel_link_m_n *m_n)
5508
					 struct intel_link_m_n *m_n)
5499
{
5509
{
5500
	struct drm_device *dev = crtc->base.dev;
5510
	struct drm_device *dev = crtc->base.dev;
5501
	struct drm_i915_private *dev_priv = dev->dev_private;
5511
	struct drm_i915_private *dev_priv = dev->dev_private;
5502
	int pipe = crtc->pipe;
5512
	int pipe = crtc->pipe;
5503
 
5513
 
5504
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5514
	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5505
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5515
	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5506
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5516
	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5507
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5517
	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5508
}
5518
}
5509
 
5519
 
5510
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5520
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5511
					 struct intel_link_m_n *m_n)
5521
					 struct intel_link_m_n *m_n)
5512
{
5522
{
5513
	struct drm_device *dev = crtc->base.dev;
5523
	struct drm_device *dev = crtc->base.dev;
5514
	struct drm_i915_private *dev_priv = dev->dev_private;
5524
	struct drm_i915_private *dev_priv = dev->dev_private;
5515
	int pipe = crtc->pipe;
5525
	int pipe = crtc->pipe;
5516
	enum transcoder transcoder = crtc->config.cpu_transcoder;
5526
	enum transcoder transcoder = crtc->config.cpu_transcoder;
5517
 
5527
 
5518
	if (INTEL_INFO(dev)->gen >= 5) {
5528
	if (INTEL_INFO(dev)->gen >= 5) {
5519
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5529
		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5520
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5530
		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5521
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5531
		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5522
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5532
		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5523
	} else {
5533
	} else {
5524
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5534
		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5525
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5535
		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5526
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5536
		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5527
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5537
		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5528
	}
5538
	}
5529
}
5539
}
5530
 
5540
 
5531
static void intel_dp_set_m_n(struct intel_crtc *crtc)
5541
static void intel_dp_set_m_n(struct intel_crtc *crtc)
5532
{
5542
{
5533
	if (crtc->config.has_pch_encoder)
5543
	if (crtc->config.has_pch_encoder)
5534
		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5544
		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5535
	else
5545
	else
5536
		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5546
		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5537
}
5547
}
5538
 
5548
 
5539
static void vlv_update_pll(struct intel_crtc *crtc)
5549
static void vlv_update_pll(struct intel_crtc *crtc)
5540
{
5550
{
5541
	u32 dpll, dpll_md;
5551
	u32 dpll, dpll_md;
5542
 
5552
 
5543
	/*
5553
	/*
5544
	 * Enable DPIO clock input. We should never disable the reference
5554
	 * Enable DPIO clock input. We should never disable the reference
5545
	 * clock for pipe B, since VGA hotplug / manual detection depends
5555
	 * clock for pipe B, since VGA hotplug / manual detection depends
5546
	 * on it.
5556
	 * on it.
5547
	 */
5557
	 */
5548
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5558
	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5549
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5559
		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5550
	/* We should never disable this, set it here for state tracking */
5560
	/* We should never disable this, set it here for state tracking */
5551
	if (crtc->pipe == PIPE_B)
5561
	if (crtc->pipe == PIPE_B)
5552
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5562
		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5553
	dpll |= DPLL_VCO_ENABLE;
5563
	dpll |= DPLL_VCO_ENABLE;
5554
	crtc->config.dpll_hw_state.dpll = dpll;
5564
	crtc->config.dpll_hw_state.dpll = dpll;
5555
 
5565
 
5556
	dpll_md = (crtc->config.pixel_multiplier - 1)
5566
	dpll_md = (crtc->config.pixel_multiplier - 1)
5557
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5567
		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5558
	crtc->config.dpll_hw_state.dpll_md = dpll_md;
5568
	crtc->config.dpll_hw_state.dpll_md = dpll_md;
5559
}
5569
}
5560
 
5570
 
5561
static void vlv_prepare_pll(struct intel_crtc *crtc)
5571
static void vlv_prepare_pll(struct intel_crtc *crtc)
5562
{
5572
{
5563
	struct drm_device *dev = crtc->base.dev;
5573
	struct drm_device *dev = crtc->base.dev;
5564
	struct drm_i915_private *dev_priv = dev->dev_private;
5574
	struct drm_i915_private *dev_priv = dev->dev_private;
5565
	int pipe = crtc->pipe;
5575
	int pipe = crtc->pipe;
5566
	u32 mdiv;
5576
	u32 mdiv;
5567
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
5577
	u32 bestn, bestm1, bestm2, bestp1, bestp2;
5568
	u32 coreclk, reg_val;
5578
	u32 coreclk, reg_val;
5569
 
5579
 
5570
	mutex_lock(&dev_priv->dpio_lock);
5580
	mutex_lock(&dev_priv->dpio_lock);
5571
 
5581
 
5572
	bestn = crtc->config.dpll.n;
5582
	bestn = crtc->config.dpll.n;
5573
	bestm1 = crtc->config.dpll.m1;
5583
	bestm1 = crtc->config.dpll.m1;
5574
	bestm2 = crtc->config.dpll.m2;
5584
	bestm2 = crtc->config.dpll.m2;
5575
	bestp1 = crtc->config.dpll.p1;
5585
	bestp1 = crtc->config.dpll.p1;
5576
	bestp2 = crtc->config.dpll.p2;
5586
	bestp2 = crtc->config.dpll.p2;
5577
 
5587
 
5578
	/* See eDP HDMI DPIO driver vbios notes doc */
5588
	/* See eDP HDMI DPIO driver vbios notes doc */
5579
 
5589
 
5580
	/* PLL B needs special handling */
5590
	/* PLL B needs special handling */
5581
	if (pipe == PIPE_B)
5591
	if (pipe == PIPE_B)
5582
		vlv_pllb_recal_opamp(dev_priv, pipe);
5592
		vlv_pllb_recal_opamp(dev_priv, pipe);
5583
 
5593
 
5584
	/* Set up Tx target for periodic Rcomp update */
5594
	/* Set up Tx target for periodic Rcomp update */
5585
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
5595
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
5586
 
5596
 
5587
	/* Disable target IRef on PLL */
5597
	/* Disable target IRef on PLL */
5588
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
5598
	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
5589
	reg_val &= 0x00ffffff;
5599
	reg_val &= 0x00ffffff;
5590
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
5600
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
5591
 
5601
 
5592
	/* Disable fast lock */
5602
	/* Disable fast lock */
5593
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
5603
	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
5594
 
5604
 
5595
	/* Set idtafcrecal before PLL is enabled */
5605
	/* Set idtafcrecal before PLL is enabled */
5596
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5606
	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5597
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5607
	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5598
	mdiv |= ((bestn << DPIO_N_SHIFT));
5608
	mdiv |= ((bestn << DPIO_N_SHIFT));
5599
	mdiv |= (1 << DPIO_K_SHIFT);
5609
	mdiv |= (1 << DPIO_K_SHIFT);
5600
 
5610
 
5601
	/*
5611
	/*
5602
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5612
	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5603
	 * but we don't support that).
5613
	 * but we don't support that).
5604
	 * Note: don't use the DAC post divider as it seems unstable.
5614
	 * Note: don't use the DAC post divider as it seems unstable.
5605
	 */
5615
	 */
5606
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5616
	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5607
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5617
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5608
 
5618
 
5609
	mdiv |= DPIO_ENABLE_CALIBRATION;
5619
	mdiv |= DPIO_ENABLE_CALIBRATION;
5610
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5620
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5611
 
5621
 
5612
	/* Set HBR and RBR LPF coefficients */
5622
	/* Set HBR and RBR LPF coefficients */
5613
	if (crtc->config.port_clock == 162000 ||
5623
	if (crtc->config.port_clock == 162000 ||
5614
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
5624
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
5615
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
5625
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
5616
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5626
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5617
				 0x009f0003);
5627
				 0x009f0003);
5618
	else
5628
	else
5619
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5629
		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5620
				 0x00d0000f);
5630
				 0x00d0000f);
5621
 
5631
 
5622
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
5632
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
5623
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
5633
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
5624
		/* Use SSC source */
5634
		/* Use SSC source */
5625
		if (pipe == PIPE_A)
5635
		if (pipe == PIPE_A)
5626
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5636
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5627
					 0x0df40000);
5637
					 0x0df40000);
5628
		else
5638
		else
5629
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5639
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5630
					 0x0df70000);
5640
					 0x0df70000);
5631
	} else { /* HDMI or VGA */
5641
	} else { /* HDMI or VGA */
5632
		/* Use bend source */
5642
		/* Use bend source */
5633
		if (pipe == PIPE_A)
5643
		if (pipe == PIPE_A)
5634
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5644
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5635
					 0x0df70000);
5645
					 0x0df70000);
5636
		else
5646
		else
5637
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5647
			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5638
					 0x0df40000);
5648
					 0x0df40000);
5639
	}
5649
	}
5640
 
5650
 
5641
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5651
	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5642
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5652
	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5643
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5653
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5644
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5654
	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5645
		coreclk |= 0x01000000;
5655
		coreclk |= 0x01000000;
5646
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5656
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5647
 
5657
 
5648
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5658
	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5649
	mutex_unlock(&dev_priv->dpio_lock);
5659
	mutex_unlock(&dev_priv->dpio_lock);
5650
}
5660
}
5651
 
5661
 
5652
static void chv_update_pll(struct intel_crtc *crtc)
5662
static void chv_update_pll(struct intel_crtc *crtc)
5653
{
5663
{
5654
	struct drm_device *dev = crtc->base.dev;
5664
	struct drm_device *dev = crtc->base.dev;
5655
	struct drm_i915_private *dev_priv = dev->dev_private;
5665
	struct drm_i915_private *dev_priv = dev->dev_private;
5656
	int pipe = crtc->pipe;
5666
	int pipe = crtc->pipe;
5657
	int dpll_reg = DPLL(crtc->pipe);
5667
	int dpll_reg = DPLL(crtc->pipe);
5658
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
5668
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
5659
	u32 loopfilter, intcoeff;
5669
	u32 loopfilter, intcoeff;
5660
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5670
	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5661
	int refclk;
5671
	int refclk;
5662
 
5672
 
5663
	crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5673
	crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5664
		DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5674
		DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5665
		DPLL_VCO_ENABLE;
5675
		DPLL_VCO_ENABLE;
5666
	if (pipe != PIPE_A)
5676
	if (pipe != PIPE_A)
5667
		crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5677
		crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5668
 
5678
 
5669
	crtc->config.dpll_hw_state.dpll_md =
5679
	crtc->config.dpll_hw_state.dpll_md =
5670
		(crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5680
		(crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5671
 
5681
 
5672
	bestn = crtc->config.dpll.n;
5682
	bestn = crtc->config.dpll.n;
5673
	bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
5683
	bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
5674
	bestm1 = crtc->config.dpll.m1;
5684
	bestm1 = crtc->config.dpll.m1;
5675
	bestm2 = crtc->config.dpll.m2 >> 22;
5685
	bestm2 = crtc->config.dpll.m2 >> 22;
5676
	bestp1 = crtc->config.dpll.p1;
5686
	bestp1 = crtc->config.dpll.p1;
5677
	bestp2 = crtc->config.dpll.p2;
5687
	bestp2 = crtc->config.dpll.p2;
5678
 
5688
 
5679
	/*
5689
	/*
5680
	 * Enable Refclk and SSC
5690
	 * Enable Refclk and SSC
5681
	 */
5691
	 */
5682
	I915_WRITE(dpll_reg,
5692
	I915_WRITE(dpll_reg,
5683
		   crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
5693
		   crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
5684
 
5694
 
5685
	mutex_lock(&dev_priv->dpio_lock);
5695
	mutex_lock(&dev_priv->dpio_lock);
5686
 
5696
 
5687
	/* p1 and p2 divider */
5697
	/* p1 and p2 divider */
5688
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
5698
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
5689
			5 << DPIO_CHV_S1_DIV_SHIFT |
5699
			5 << DPIO_CHV_S1_DIV_SHIFT |
5690
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
5700
			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
5691
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
5701
			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
5692
			1 << DPIO_CHV_K_DIV_SHIFT);
5702
			1 << DPIO_CHV_K_DIV_SHIFT);
5693
 
5703
 
5694
	/* Feedback post-divider - m2 */
5704
	/* Feedback post-divider - m2 */
5695
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
5705
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
5696
 
5706
 
5697
	/* Feedback refclk divider - n and m1 */
5707
	/* Feedback refclk divider - n and m1 */
5698
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
5708
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
5699
			DPIO_CHV_M1_DIV_BY_2 |
5709
			DPIO_CHV_M1_DIV_BY_2 |
5700
			1 << DPIO_CHV_N_DIV_SHIFT);
5710
			1 << DPIO_CHV_N_DIV_SHIFT);
5701
 
5711
 
5702
	/* M2 fraction division */
5712
	/* M2 fraction division */
5703
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
5713
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
5704
 
5714
 
5705
	/* M2 fraction division enable */
5715
	/* M2 fraction division enable */
5706
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
5716
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
5707
		       DPIO_CHV_FRAC_DIV_EN |
5717
		       DPIO_CHV_FRAC_DIV_EN |
5708
		       (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
5718
		       (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
5709
 
5719
 
5710
	/* Loop filter */
5720
	/* Loop filter */
5711
	refclk = i9xx_get_refclk(&crtc->base, 0);
5721
	refclk = i9xx_get_refclk(&crtc->base, 0);
5712
	loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
5722
	loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
5713
		2 << DPIO_CHV_GAIN_CTRL_SHIFT;
5723
		2 << DPIO_CHV_GAIN_CTRL_SHIFT;
5714
	if (refclk == 100000)
5724
	if (refclk == 100000)
5715
		intcoeff = 11;
5725
		intcoeff = 11;
5716
	else if (refclk == 38400)
5726
	else if (refclk == 38400)
5717
		intcoeff = 10;
5727
		intcoeff = 10;
5718
	else
5728
	else
5719
		intcoeff = 9;
5729
		intcoeff = 9;
5720
	loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
5730
	loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
5721
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
5731
	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
5722
 
5732
 
5723
	/* AFC Recal */
5733
	/* AFC Recal */
5724
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
5734
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
5725
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
5735
			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
5726
			DPIO_AFC_RECAL);
5736
			DPIO_AFC_RECAL);
5727
 
5737
 
5728
	mutex_unlock(&dev_priv->dpio_lock);
5738
	mutex_unlock(&dev_priv->dpio_lock);
5729
}
5739
}
5730
 
5740
 
5731
static void i9xx_update_pll(struct intel_crtc *crtc,
5741
static void i9xx_update_pll(struct intel_crtc *crtc,
5732
			    intel_clock_t *reduced_clock,
5742
			    intel_clock_t *reduced_clock,
5733
			    int num_connectors)
5743
			    int num_connectors)
5734
{
5744
{
5735
	struct drm_device *dev = crtc->base.dev;
5745
	struct drm_device *dev = crtc->base.dev;
5736
	struct drm_i915_private *dev_priv = dev->dev_private;
5746
	struct drm_i915_private *dev_priv = dev->dev_private;
5737
	u32 dpll;
5747
	u32 dpll;
5738
	bool is_sdvo;
5748
	bool is_sdvo;
5739
	struct dpll *clock = &crtc->config.dpll;
5749
	struct dpll *clock = &crtc->config.dpll;
5740
 
5750
 
5741
	i9xx_update_pll_dividers(crtc, reduced_clock);
5751
	i9xx_update_pll_dividers(crtc, reduced_clock);
5742
 
5752
 
5743
	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
5753
	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
5744
		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
5754
		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
5745
 
5755
 
5746
	dpll = DPLL_VGA_MODE_DIS;
5756
	dpll = DPLL_VGA_MODE_DIS;
5747
 
5757
 
5748
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
5758
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
5749
		dpll |= DPLLB_MODE_LVDS;
5759
		dpll |= DPLLB_MODE_LVDS;
5750
	else
5760
	else
5751
		dpll |= DPLLB_MODE_DAC_SERIAL;
5761
		dpll |= DPLLB_MODE_DAC_SERIAL;
5752
 
5762
 
5753
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5763
	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5754
			dpll |= (crtc->config.pixel_multiplier - 1)
5764
			dpll |= (crtc->config.pixel_multiplier - 1)
5755
				<< SDVO_MULTIPLIER_SHIFT_HIRES;
5765
				<< SDVO_MULTIPLIER_SHIFT_HIRES;
5756
		}
5766
		}
5757
 
5767
 
5758
	if (is_sdvo)
5768
	if (is_sdvo)
5759
		dpll |= DPLL_SDVO_HIGH_SPEED;
5769
		dpll |= DPLL_SDVO_HIGH_SPEED;
5760
 
5770
 
5761
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
5771
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
5762
		dpll |= DPLL_SDVO_HIGH_SPEED;
5772
		dpll |= DPLL_SDVO_HIGH_SPEED;
5763
 
5773
 
5764
	/* compute bitmask from p1 value */
5774
	/* compute bitmask from p1 value */
5765
	if (IS_PINEVIEW(dev))
5775
	if (IS_PINEVIEW(dev))
5766
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5776
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5767
	else {
5777
	else {
5768
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5778
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5769
		if (IS_G4X(dev) && reduced_clock)
5779
		if (IS_G4X(dev) && reduced_clock)
5770
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5780
			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5771
	}
5781
	}
5772
	switch (clock->p2) {
5782
	switch (clock->p2) {
5773
	case 5:
5783
	case 5:
5774
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5784
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5775
		break;
5785
		break;
5776
	case 7:
5786
	case 7:
5777
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5787
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5778
		break;
5788
		break;
5779
	case 10:
5789
	case 10:
5780
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5790
		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5781
		break;
5791
		break;
5782
	case 14:
5792
	case 14:
5783
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5793
		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5784
		break;
5794
		break;
5785
	}
5795
	}
5786
	if (INTEL_INFO(dev)->gen >= 4)
5796
	if (INTEL_INFO(dev)->gen >= 4)
5787
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5797
		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5788
 
5798
 
5789
	if (crtc->config.sdvo_tv_clock)
5799
	if (crtc->config.sdvo_tv_clock)
5790
		dpll |= PLL_REF_INPUT_TVCLKINBC;
5800
		dpll |= PLL_REF_INPUT_TVCLKINBC;
5791
	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5801
	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5792
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5802
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5793
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5803
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5794
	else
5804
	else
5795
		dpll |= PLL_REF_INPUT_DREFCLK;
5805
		dpll |= PLL_REF_INPUT_DREFCLK;
5796
 
5806
 
5797
	dpll |= DPLL_VCO_ENABLE;
5807
	dpll |= DPLL_VCO_ENABLE;
5798
	crtc->config.dpll_hw_state.dpll = dpll;
5808
	crtc->config.dpll_hw_state.dpll = dpll;
5799
 
5809
 
5800
	if (INTEL_INFO(dev)->gen >= 4) {
5810
	if (INTEL_INFO(dev)->gen >= 4) {
5801
		u32 dpll_md = (crtc->config.pixel_multiplier - 1)
5811
		u32 dpll_md = (crtc->config.pixel_multiplier - 1)
5802
					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5812
					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5803
		crtc->config.dpll_hw_state.dpll_md = dpll_md;
5813
		crtc->config.dpll_hw_state.dpll_md = dpll_md;
5804
	}
5814
	}
5805
}
5815
}
5806
 
5816
 
5807
static void i8xx_update_pll(struct intel_crtc *crtc,
5817
static void i8xx_update_pll(struct intel_crtc *crtc,
5808
			    intel_clock_t *reduced_clock,
5818
			    intel_clock_t *reduced_clock,
5809
			    int num_connectors)
5819
			    int num_connectors)
5810
{
5820
{
5811
	struct drm_device *dev = crtc->base.dev;
5821
	struct drm_device *dev = crtc->base.dev;
5812
	struct drm_i915_private *dev_priv = dev->dev_private;
5822
	struct drm_i915_private *dev_priv = dev->dev_private;
5813
	u32 dpll;
5823
	u32 dpll;
5814
	struct dpll *clock = &crtc->config.dpll;
5824
	struct dpll *clock = &crtc->config.dpll;
5815
 
5825
 
5816
	i9xx_update_pll_dividers(crtc, reduced_clock);
5826
	i9xx_update_pll_dividers(crtc, reduced_clock);
5817
 
5827
 
5818
	dpll = DPLL_VGA_MODE_DIS;
5828
	dpll = DPLL_VGA_MODE_DIS;
5819
 
5829
 
5820
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
5830
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
5821
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5831
		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5822
	} else {
5832
	} else {
5823
		if (clock->p1 == 2)
5833
		if (clock->p1 == 2)
5824
			dpll |= PLL_P1_DIVIDE_BY_TWO;
5834
			dpll |= PLL_P1_DIVIDE_BY_TWO;
5825
		else
5835
		else
5826
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5836
			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5827
		if (clock->p2 == 4)
5837
		if (clock->p2 == 4)
5828
			dpll |= PLL_P2_DIVIDE_BY_4;
5838
			dpll |= PLL_P2_DIVIDE_BY_4;
5829
	}
5839
	}
5830
 
5840
 
5831
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5841
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5832
		dpll |= DPLL_DVO_2X_MODE;
5842
		dpll |= DPLL_DVO_2X_MODE;
5833
 
5843
 
5834
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5844
	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5835
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5845
		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5836
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5846
		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5837
	else
5847
	else
5838
		dpll |= PLL_REF_INPUT_DREFCLK;
5848
		dpll |= PLL_REF_INPUT_DREFCLK;
5839
 
5849
 
5840
	dpll |= DPLL_VCO_ENABLE;
5850
	dpll |= DPLL_VCO_ENABLE;
5841
	crtc->config.dpll_hw_state.dpll = dpll;
5851
	crtc->config.dpll_hw_state.dpll = dpll;
5842
}
5852
}
5843
 
5853
 
5844
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5854
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5845
{
5855
{
5846
	struct drm_device *dev = intel_crtc->base.dev;
5856
	struct drm_device *dev = intel_crtc->base.dev;
5847
	struct drm_i915_private *dev_priv = dev->dev_private;
5857
	struct drm_i915_private *dev_priv = dev->dev_private;
5848
	enum pipe pipe = intel_crtc->pipe;
5858
	enum pipe pipe = intel_crtc->pipe;
5849
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5859
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5850
	struct drm_display_mode *adjusted_mode =
5860
	struct drm_display_mode *adjusted_mode =
5851
		&intel_crtc->config.adjusted_mode;
5861
		&intel_crtc->config.adjusted_mode;
5852
	uint32_t crtc_vtotal, crtc_vblank_end;
5862
	uint32_t crtc_vtotal, crtc_vblank_end;
5853
	int vsyncshift = 0;
5863
	int vsyncshift = 0;
5854
 
5864
 
5855
	/* We need to be careful not to changed the adjusted mode, for otherwise
5865
	/* We need to be careful not to changed the adjusted mode, for otherwise
5856
	 * the hw state checker will get angry at the mismatch. */
5866
	 * the hw state checker will get angry at the mismatch. */
5857
	crtc_vtotal = adjusted_mode->crtc_vtotal;
5867
	crtc_vtotal = adjusted_mode->crtc_vtotal;
5858
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5868
	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5859
 
5869
 
5860
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5870
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5861
		/* the chip adds 2 halflines automatically */
5871
		/* the chip adds 2 halflines automatically */
5862
		crtc_vtotal -= 1;
5872
		crtc_vtotal -= 1;
5863
		crtc_vblank_end -= 1;
5873
		crtc_vblank_end -= 1;
5864
 
5874
 
5865
		if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5875
		if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5866
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5876
			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5867
		else
5877
		else
5868
			vsyncshift = adjusted_mode->crtc_hsync_start -
5878
			vsyncshift = adjusted_mode->crtc_hsync_start -
5869
				adjusted_mode->crtc_htotal / 2;
5879
				adjusted_mode->crtc_htotal / 2;
5870
		if (vsyncshift < 0)
5880
		if (vsyncshift < 0)
5871
			vsyncshift += adjusted_mode->crtc_htotal;
5881
			vsyncshift += adjusted_mode->crtc_htotal;
5872
	}
5882
	}
5873
 
5883
 
5874
	if (INTEL_INFO(dev)->gen > 3)
5884
	if (INTEL_INFO(dev)->gen > 3)
5875
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
5885
		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
5876
 
5886
 
5877
	I915_WRITE(HTOTAL(cpu_transcoder),
5887
	I915_WRITE(HTOTAL(cpu_transcoder),
5878
		   (adjusted_mode->crtc_hdisplay - 1) |
5888
		   (adjusted_mode->crtc_hdisplay - 1) |
5879
		   ((adjusted_mode->crtc_htotal - 1) << 16));
5889
		   ((adjusted_mode->crtc_htotal - 1) << 16));
5880
	I915_WRITE(HBLANK(cpu_transcoder),
5890
	I915_WRITE(HBLANK(cpu_transcoder),
5881
		   (adjusted_mode->crtc_hblank_start - 1) |
5891
		   (adjusted_mode->crtc_hblank_start - 1) |
5882
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5892
		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5883
	I915_WRITE(HSYNC(cpu_transcoder),
5893
	I915_WRITE(HSYNC(cpu_transcoder),
5884
		   (adjusted_mode->crtc_hsync_start - 1) |
5894
		   (adjusted_mode->crtc_hsync_start - 1) |
5885
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5895
		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5886
 
5896
 
5887
	I915_WRITE(VTOTAL(cpu_transcoder),
5897
	I915_WRITE(VTOTAL(cpu_transcoder),
5888
		   (adjusted_mode->crtc_vdisplay - 1) |
5898
		   (adjusted_mode->crtc_vdisplay - 1) |
5889
		   ((crtc_vtotal - 1) << 16));
5899
		   ((crtc_vtotal - 1) << 16));
5890
	I915_WRITE(VBLANK(cpu_transcoder),
5900
	I915_WRITE(VBLANK(cpu_transcoder),
5891
		   (adjusted_mode->crtc_vblank_start - 1) |
5901
		   (adjusted_mode->crtc_vblank_start - 1) |
5892
		   ((crtc_vblank_end - 1) << 16));
5902
		   ((crtc_vblank_end - 1) << 16));
5893
	I915_WRITE(VSYNC(cpu_transcoder),
5903
	I915_WRITE(VSYNC(cpu_transcoder),
5894
		   (adjusted_mode->crtc_vsync_start - 1) |
5904
		   (adjusted_mode->crtc_vsync_start - 1) |
5895
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5905
		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5896
 
5906
 
5897
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5907
	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5898
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5908
	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5899
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
5909
	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
5900
	 * bits. */
5910
	 * bits. */
5901
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
5911
	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
5902
	    (pipe == PIPE_B || pipe == PIPE_C))
5912
	    (pipe == PIPE_B || pipe == PIPE_C))
5903
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
5913
		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
5904
 
5914
 
5905
	/* pipesrc controls the size that is scaled from, which should
5915
	/* pipesrc controls the size that is scaled from, which should
5906
	 * always be the user's requested size.
5916
	 * always be the user's requested size.
5907
	 */
5917
	 */
5908
	I915_WRITE(PIPESRC(pipe),
5918
	I915_WRITE(PIPESRC(pipe),
5909
		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
5919
		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
5910
		   (intel_crtc->config.pipe_src_h - 1));
5920
		   (intel_crtc->config.pipe_src_h - 1));
5911
}
5921
}
5912
 
5922
 
5913
static void intel_get_pipe_timings(struct intel_crtc *crtc,
5923
static void intel_get_pipe_timings(struct intel_crtc *crtc,
5914
				   struct intel_crtc_config *pipe_config)
5924
				   struct intel_crtc_config *pipe_config)
5915
{
5925
{
5916
	struct drm_device *dev = crtc->base.dev;
5926
	struct drm_device *dev = crtc->base.dev;
5917
	struct drm_i915_private *dev_priv = dev->dev_private;
5927
	struct drm_i915_private *dev_priv = dev->dev_private;
5918
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5928
	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5919
	uint32_t tmp;
5929
	uint32_t tmp;
5920
 
5930
 
5921
	tmp = I915_READ(HTOTAL(cpu_transcoder));
5931
	tmp = I915_READ(HTOTAL(cpu_transcoder));
5922
	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5932
	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5923
	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5933
	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5924
	tmp = I915_READ(HBLANK(cpu_transcoder));
5934
	tmp = I915_READ(HBLANK(cpu_transcoder));
5925
	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
5935
	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
5926
	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
5936
	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
5927
	tmp = I915_READ(HSYNC(cpu_transcoder));
5937
	tmp = I915_READ(HSYNC(cpu_transcoder));
5928
	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5938
	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5929
	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5939
	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5930
 
5940
 
5931
	tmp = I915_READ(VTOTAL(cpu_transcoder));
5941
	tmp = I915_READ(VTOTAL(cpu_transcoder));
5932
	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5942
	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5933
	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5943
	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5934
	tmp = I915_READ(VBLANK(cpu_transcoder));
5944
	tmp = I915_READ(VBLANK(cpu_transcoder));
5935
	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
5945
	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
5936
	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
5946
	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
5937
	tmp = I915_READ(VSYNC(cpu_transcoder));
5947
	tmp = I915_READ(VSYNC(cpu_transcoder));
5938
	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5948
	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5939
	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5949
	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5940
 
5950
 
5941
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
5951
	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
5942
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5952
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5943
		pipe_config->adjusted_mode.crtc_vtotal += 1;
5953
		pipe_config->adjusted_mode.crtc_vtotal += 1;
5944
		pipe_config->adjusted_mode.crtc_vblank_end += 1;
5954
		pipe_config->adjusted_mode.crtc_vblank_end += 1;
5945
	}
5955
	}
5946
 
5956
 
5947
	tmp = I915_READ(PIPESRC(crtc->pipe));
5957
	tmp = I915_READ(PIPESRC(crtc->pipe));
5948
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5958
	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5949
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5959
	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5950
 
5960
 
5951
	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5961
	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5952
	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
5962
	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
5953
}
5963
}
5954
 
5964
 
5955
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5965
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5956
					     struct intel_crtc_config *pipe_config)
5966
					     struct intel_crtc_config *pipe_config)
5957
{
5967
{
5958
	mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5968
	mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5959
	mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
5969
	mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
5960
	mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5970
	mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5961
	mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5971
	mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5962
 
5972
 
5963
	mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5973
	mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5964
	mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5974
	mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5965
	mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5975
	mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5966
	mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
5976
	mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
5967
 
5977
 
5968
	mode->flags = pipe_config->adjusted_mode.flags;
5978
	mode->flags = pipe_config->adjusted_mode.flags;
5969
 
5979
 
5970
	mode->clock = pipe_config->adjusted_mode.crtc_clock;
5980
	mode->clock = pipe_config->adjusted_mode.crtc_clock;
5971
	mode->flags |= pipe_config->adjusted_mode.flags;
5981
	mode->flags |= pipe_config->adjusted_mode.flags;
5972
}
5982
}
5973
 
5983
 
5974
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5984
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5975
{
5985
{
5976
	struct drm_device *dev = intel_crtc->base.dev;
5986
	struct drm_device *dev = intel_crtc->base.dev;
5977
	struct drm_i915_private *dev_priv = dev->dev_private;
5987
	struct drm_i915_private *dev_priv = dev->dev_private;
5978
	uint32_t pipeconf;
5988
	uint32_t pipeconf;
5979
 
5989
 
5980
	pipeconf = 0;
5990
	pipeconf = 0;
5981
 
5991
 
5982
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
5992
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
5983
	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
5993
	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
5984
		pipeconf |= PIPECONF_ENABLE;
5994
		pipeconf |= PIPECONF_ENABLE;
5985
 
5995
 
5986
	if (intel_crtc->config.double_wide)
5996
	if (intel_crtc->config.double_wide)
5987
			pipeconf |= PIPECONF_DOUBLE_WIDE;
5997
			pipeconf |= PIPECONF_DOUBLE_WIDE;
5988
 
5998
 
5989
	/* only g4x and later have fancy bpc/dither controls */
5999
	/* only g4x and later have fancy bpc/dither controls */
5990
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6000
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5991
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
6001
		/* Bspec claims that we can't use dithering for 30bpp pipes. */
5992
		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
6002
		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
5993
			pipeconf |= PIPECONF_DITHER_EN |
6003
			pipeconf |= PIPECONF_DITHER_EN |
5994
				    PIPECONF_DITHER_TYPE_SP;
6004
				    PIPECONF_DITHER_TYPE_SP;
5995
 
6005
 
5996
		switch (intel_crtc->config.pipe_bpp) {
6006
		switch (intel_crtc->config.pipe_bpp) {
5997
		case 18:
6007
		case 18:
5998
			pipeconf |= PIPECONF_6BPC;
6008
			pipeconf |= PIPECONF_6BPC;
5999
			break;
6009
			break;
6000
		case 24:
6010
		case 24:
6001
			pipeconf |= PIPECONF_8BPC;
6011
			pipeconf |= PIPECONF_8BPC;
6002
			break;
6012
			break;
6003
		case 30:
6013
		case 30:
6004
			pipeconf |= PIPECONF_10BPC;
6014
			pipeconf |= PIPECONF_10BPC;
6005
			break;
6015
			break;
6006
		default:
6016
		default:
6007
			/* Case prevented by intel_choose_pipe_bpp_dither. */
6017
			/* Case prevented by intel_choose_pipe_bpp_dither. */
6008
			BUG();
6018
			BUG();
6009
		}
6019
		}
6010
	}
6020
	}
6011
 
6021
 
6012
	if (HAS_PIPE_CXSR(dev)) {
6022
	if (HAS_PIPE_CXSR(dev)) {
6013
		if (intel_crtc->lowfreq_avail) {
6023
		if (intel_crtc->lowfreq_avail) {
6014
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6024
			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6015
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6025
			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6016
		} else {
6026
		} else {
6017
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6027
			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6018
		}
6028
		}
6019
	}
6029
	}
6020
 
6030
 
6021
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6031
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6022
		if (INTEL_INFO(dev)->gen < 4 ||
6032
		if (INTEL_INFO(dev)->gen < 4 ||
6023
		    intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
6033
		    intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
6024
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6034
		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6025
	else
6035
	else
6026
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6036
			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6027
	} else
6037
	} else
6028
		pipeconf |= PIPECONF_PROGRESSIVE;
6038
		pipeconf |= PIPECONF_PROGRESSIVE;
6029
 
6039
 
6030
	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
6040
	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
6031
			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6041
			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6032
 
6042
 
6033
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6043
	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6034
	POSTING_READ(PIPECONF(intel_crtc->pipe));
6044
	POSTING_READ(PIPECONF(intel_crtc->pipe));
6035
}
6045
}
6036
 
6046
 
6037
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6047
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6038
			      int x, int y,
6048
			      int x, int y,
6039
			      struct drm_framebuffer *fb)
6049
			      struct drm_framebuffer *fb)
6040
{
6050
{
6041
	struct drm_device *dev = crtc->dev;
6051
	struct drm_device *dev = crtc->dev;
6042
	struct drm_i915_private *dev_priv = dev->dev_private;
6052
	struct drm_i915_private *dev_priv = dev->dev_private;
6043
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6053
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6044
	int refclk, num_connectors = 0;
6054
	int refclk, num_connectors = 0;
6045
	intel_clock_t clock, reduced_clock;
6055
	intel_clock_t clock, reduced_clock;
6046
	bool ok, has_reduced_clock = false;
6056
	bool ok, has_reduced_clock = false;
6047
	bool is_lvds = false, is_dsi = false;
6057
	bool is_lvds = false, is_dsi = false;
6048
	struct intel_encoder *encoder;
6058
	struct intel_encoder *encoder;
6049
	const intel_limit_t *limit;
6059
	const intel_limit_t *limit;
6050
 
6060
 
6051
	for_each_encoder_on_crtc(dev, crtc, encoder) {
6061
	for_each_encoder_on_crtc(dev, crtc, encoder) {
6052
		switch (encoder->type) {
6062
		switch (encoder->type) {
6053
		case INTEL_OUTPUT_LVDS:
6063
		case INTEL_OUTPUT_LVDS:
6054
			is_lvds = true;
6064
			is_lvds = true;
6055
			break;
6065
			break;
6056
		case INTEL_OUTPUT_DSI:
6066
		case INTEL_OUTPUT_DSI:
6057
			is_dsi = true;
6067
			is_dsi = true;
6058
			break;
6068
			break;
6059
		}
6069
		}
6060
 
6070
 
6061
		num_connectors++;
6071
		num_connectors++;
6062
	}
6072
	}
6063
 
6073
 
6064
	if (is_dsi)
6074
	if (is_dsi)
6065
		return 0;
6075
		return 0;
6066
 
6076
 
6067
	if (!intel_crtc->config.clock_set) {
6077
	if (!intel_crtc->config.clock_set) {
6068
	refclk = i9xx_get_refclk(crtc, num_connectors);
6078
	refclk = i9xx_get_refclk(crtc, num_connectors);
6069
 
6079
 
6070
	/*
6080
	/*
6071
		 * Returns a set of divisors for the desired target clock with
6081
		 * Returns a set of divisors for the desired target clock with
6072
		 * the given refclk, or FALSE.  The returned values represent
6082
		 * the given refclk, or FALSE.  The returned values represent
6073
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6083
		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6074
		 * 2) / p1 / p2.
6084
		 * 2) / p1 / p2.
6075
	 */
6085
	 */
6076
	limit = intel_limit(crtc, refclk);
6086
	limit = intel_limit(crtc, refclk);
6077
	ok = dev_priv->display.find_dpll(limit, crtc,
6087
	ok = dev_priv->display.find_dpll(limit, crtc,
6078
					 intel_crtc->config.port_clock,
6088
					 intel_crtc->config.port_clock,
6079
					 refclk, NULL, &clock);
6089
					 refclk, NULL, &clock);
6080
		if (!ok) {
6090
		if (!ok) {
6081
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
6091
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
6082
		return -EINVAL;
6092
		return -EINVAL;
6083
	}
6093
	}
6084
 
6094
 
6085
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6095
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6086
		/*
6096
		/*
6087
			 * Ensure we match the reduced clock's P to the target
6097
			 * Ensure we match the reduced clock's P to the target
6088
			 * clock.  If the clocks don't match, we can't switch
6098
			 * clock.  If the clocks don't match, we can't switch
6089
			 * the display clock by using the FP0/FP1. In such case
6099
			 * the display clock by using the FP0/FP1. In such case
6090
			 * we will disable the LVDS downclock feature.
6100
			 * we will disable the LVDS downclock feature.
6091
		*/
6101
		*/
6092
		has_reduced_clock =
6102
		has_reduced_clock =
6093
			dev_priv->display.find_dpll(limit, crtc,
6103
			dev_priv->display.find_dpll(limit, crtc,
6094
						    dev_priv->lvds_downclock,
6104
						    dev_priv->lvds_downclock,
6095
						    refclk, &clock,
6105
						    refclk, &clock,
6096
						    &reduced_clock);
6106
						    &reduced_clock);
6097
	}
6107
	}
6098
	/* Compat-code for transition, will disappear. */
6108
	/* Compat-code for transition, will disappear. */
6099
		intel_crtc->config.dpll.n = clock.n;
6109
		intel_crtc->config.dpll.n = clock.n;
6100
		intel_crtc->config.dpll.m1 = clock.m1;
6110
		intel_crtc->config.dpll.m1 = clock.m1;
6101
		intel_crtc->config.dpll.m2 = clock.m2;
6111
		intel_crtc->config.dpll.m2 = clock.m2;
6102
		intel_crtc->config.dpll.p1 = clock.p1;
6112
		intel_crtc->config.dpll.p1 = clock.p1;
6103
		intel_crtc->config.dpll.p2 = clock.p2;
6113
		intel_crtc->config.dpll.p2 = clock.p2;
6104
	}
6114
	}
6105
 
6115
 
6106
	if (IS_GEN2(dev)) {
6116
	if (IS_GEN2(dev)) {
6107
		i8xx_update_pll(intel_crtc,
6117
		i8xx_update_pll(intel_crtc,
6108
				has_reduced_clock ? &reduced_clock : NULL,
6118
				has_reduced_clock ? &reduced_clock : NULL,
6109
				num_connectors);
6119
				num_connectors);
6110
	} else if (IS_CHERRYVIEW(dev)) {
6120
	} else if (IS_CHERRYVIEW(dev)) {
6111
		chv_update_pll(intel_crtc);
6121
		chv_update_pll(intel_crtc);
6112
	} else if (IS_VALLEYVIEW(dev)) {
6122
	} else if (IS_VALLEYVIEW(dev)) {
6113
		vlv_update_pll(intel_crtc);
6123
		vlv_update_pll(intel_crtc);
6114
	} else {
6124
	} else {
6115
		i9xx_update_pll(intel_crtc,
6125
		i9xx_update_pll(intel_crtc,
6116
				has_reduced_clock ? &reduced_clock : NULL,
6126
				has_reduced_clock ? &reduced_clock : NULL,
6117
				num_connectors);
6127
				num_connectors);
6118
	}
6128
	}
6119
 
6129
 
6120
	return 0;
6130
	return 0;
6121
}
6131
}
6122
 
6132
 
6123
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6133
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6124
				 struct intel_crtc_config *pipe_config)
6134
				 struct intel_crtc_config *pipe_config)
6125
{
6135
{
6126
	struct drm_device *dev = crtc->base.dev;
6136
	struct drm_device *dev = crtc->base.dev;
6127
	struct drm_i915_private *dev_priv = dev->dev_private;
6137
	struct drm_i915_private *dev_priv = dev->dev_private;
6128
	uint32_t tmp;
6138
	uint32_t tmp;
6129
 
6139
 
6130
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6140
	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6131
		return;
6141
		return;
6132
 
6142
 
6133
	tmp = I915_READ(PFIT_CONTROL);
6143
	tmp = I915_READ(PFIT_CONTROL);
6134
	if (!(tmp & PFIT_ENABLE))
6144
	if (!(tmp & PFIT_ENABLE))
6135
		return;
6145
		return;
6136
 
6146
 
6137
	/* Check whether the pfit is attached to our pipe. */
6147
	/* Check whether the pfit is attached to our pipe. */
6138
	if (INTEL_INFO(dev)->gen < 4) {
6148
	if (INTEL_INFO(dev)->gen < 4) {
6139
		if (crtc->pipe != PIPE_B)
6149
		if (crtc->pipe != PIPE_B)
6140
			return;
6150
			return;
6141
	} else {
6151
	} else {
6142
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6152
		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6143
			return;
6153
			return;
6144
	}
6154
	}
6145
 
6155
 
6146
	pipe_config->gmch_pfit.control = tmp;
6156
	pipe_config->gmch_pfit.control = tmp;
6147
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6157
	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6148
	if (INTEL_INFO(dev)->gen < 5)
6158
	if (INTEL_INFO(dev)->gen < 5)
6149
		pipe_config->gmch_pfit.lvds_border_bits =
6159
		pipe_config->gmch_pfit.lvds_border_bits =
6150
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6160
			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6151
}
6161
}
6152
 
6162
 
6153
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6163
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6154
			       struct intel_crtc_config *pipe_config)
6164
			       struct intel_crtc_config *pipe_config)
6155
{
6165
{
6156
	struct drm_device *dev = crtc->base.dev;
6166
	struct drm_device *dev = crtc->base.dev;
6157
	struct drm_i915_private *dev_priv = dev->dev_private;
6167
	struct drm_i915_private *dev_priv = dev->dev_private;
6158
	int pipe = pipe_config->cpu_transcoder;
6168
	int pipe = pipe_config->cpu_transcoder;
6159
	intel_clock_t clock;
6169
	intel_clock_t clock;
6160
	u32 mdiv;
6170
	u32 mdiv;
6161
	int refclk = 100000;
6171
	int refclk = 100000;
6162
 
6172
 
6163
	/* In case of MIPI DPLL will not even be used */
6173
	/* In case of MIPI DPLL will not even be used */
6164
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
6174
	if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
6165
		return;
6175
		return;
6166
 
6176
 
6167
	mutex_lock(&dev_priv->dpio_lock);
6177
	mutex_lock(&dev_priv->dpio_lock);
6168
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6178
	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6169
	mutex_unlock(&dev_priv->dpio_lock);
6179
	mutex_unlock(&dev_priv->dpio_lock);
6170
 
6180
 
6171
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6181
	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6172
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
6182
	clock.m2 = mdiv & DPIO_M2DIV_MASK;
6173
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6183
	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6174
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6184
	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6175
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6185
	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6176
 
6186
 
6177
	vlv_clock(refclk, &clock);
6187
	vlv_clock(refclk, &clock);
6178
 
6188
 
6179
	/* clock.dot is the fast clock */
6189
	/* clock.dot is the fast clock */
6180
	pipe_config->port_clock = clock.dot / 5;
6190
	pipe_config->port_clock = clock.dot / 5;
6181
}
6191
}
6182
 
6192
 
6183
static void i9xx_get_plane_config(struct intel_crtc *crtc,
6193
static void i9xx_get_plane_config(struct intel_crtc *crtc,
6184
				  struct intel_plane_config *plane_config)
6194
				  struct intel_plane_config *plane_config)
6185
{
6195
{
6186
	struct drm_device *dev = crtc->base.dev;
6196
	struct drm_device *dev = crtc->base.dev;
6187
	struct drm_i915_private *dev_priv = dev->dev_private;
6197
	struct drm_i915_private *dev_priv = dev->dev_private;
6188
	u32 val, base, offset;
6198
	u32 val, base, offset;
6189
	int pipe = crtc->pipe, plane = crtc->plane;
6199
	int pipe = crtc->pipe, plane = crtc->plane;
6190
	int fourcc, pixel_format;
6200
	int fourcc, pixel_format;
6191
	int aligned_height;
6201
	int aligned_height;
6192
 
6202
 
6193
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6203
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6194
	if (!crtc->base.primary->fb) {
6204
	if (!crtc->base.primary->fb) {
6195
		DRM_DEBUG_KMS("failed to alloc fb\n");
6205
		DRM_DEBUG_KMS("failed to alloc fb\n");
6196
		return;
6206
		return;
6197
	}
6207
	}
6198
 
6208
 
6199
	val = I915_READ(DSPCNTR(plane));
6209
	val = I915_READ(DSPCNTR(plane));
6200
 
6210
 
6201
	if (INTEL_INFO(dev)->gen >= 4)
6211
	if (INTEL_INFO(dev)->gen >= 4)
6202
		if (val & DISPPLANE_TILED)
6212
		if (val & DISPPLANE_TILED)
6203
			plane_config->tiled = true;
6213
			plane_config->tiled = true;
6204
 
6214
 
6205
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6215
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6206
	fourcc = intel_format_to_fourcc(pixel_format);
6216
	fourcc = intel_format_to_fourcc(pixel_format);
6207
	crtc->base.primary->fb->pixel_format = fourcc;
6217
	crtc->base.primary->fb->pixel_format = fourcc;
6208
	crtc->base.primary->fb->bits_per_pixel =
6218
	crtc->base.primary->fb->bits_per_pixel =
6209
		drm_format_plane_cpp(fourcc, 0) * 8;
6219
		drm_format_plane_cpp(fourcc, 0) * 8;
6210
 
6220
 
6211
	if (INTEL_INFO(dev)->gen >= 4) {
6221
	if (INTEL_INFO(dev)->gen >= 4) {
6212
		if (plane_config->tiled)
6222
		if (plane_config->tiled)
6213
			offset = I915_READ(DSPTILEOFF(plane));
6223
			offset = I915_READ(DSPTILEOFF(plane));
6214
		else
6224
		else
6215
			offset = I915_READ(DSPLINOFF(plane));
6225
			offset = I915_READ(DSPLINOFF(plane));
6216
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6226
		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6217
	} else {
6227
	} else {
6218
		base = I915_READ(DSPADDR(plane));
6228
		base = I915_READ(DSPADDR(plane));
6219
	}
6229
	}
6220
	plane_config->base = base;
6230
	plane_config->base = base;
6221
 
6231
 
6222
	val = I915_READ(PIPESRC(pipe));
6232
	val = I915_READ(PIPESRC(pipe));
6223
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6233
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6224
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6234
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6225
 
6235
 
6226
	val = I915_READ(DSPSTRIDE(pipe));
6236
	val = I915_READ(DSPSTRIDE(pipe));
6227
	crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
6237
	crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
6228
 
6238
 
6229
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6239
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6230
					    plane_config->tiled);
6240
					    plane_config->tiled);
6231
 
6241
 
6232
	plane_config->size = 16*1024*1024;
6242
	plane_config->size = 16*1024*1024;
6233
 
6243
 
6234
 
6244
 
6235
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6245
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6236
		      pipe, plane, crtc->base.primary->fb->width,
6246
		      pipe, plane, crtc->base.primary->fb->width,
6237
		      crtc->base.primary->fb->height,
6247
		      crtc->base.primary->fb->height,
6238
		      crtc->base.primary->fb->bits_per_pixel, base,
6248
		      crtc->base.primary->fb->bits_per_pixel, base,
6239
		      crtc->base.primary->fb->pitches[0],
6249
		      crtc->base.primary->fb->pitches[0],
6240
		      plane_config->size);
6250
		      plane_config->size);
6241
 
6251
 
6242
}
6252
}
6243
 
6253
 
6244
static void chv_crtc_clock_get(struct intel_crtc *crtc,
6254
static void chv_crtc_clock_get(struct intel_crtc *crtc,
6245
			       struct intel_crtc_config *pipe_config)
6255
			       struct intel_crtc_config *pipe_config)
6246
{
6256
{
6247
	struct drm_device *dev = crtc->base.dev;
6257
	struct drm_device *dev = crtc->base.dev;
6248
	struct drm_i915_private *dev_priv = dev->dev_private;
6258
	struct drm_i915_private *dev_priv = dev->dev_private;
6249
	int pipe = pipe_config->cpu_transcoder;
6259
	int pipe = pipe_config->cpu_transcoder;
6250
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6260
	enum dpio_channel port = vlv_pipe_to_channel(pipe);
6251
	intel_clock_t clock;
6261
	intel_clock_t clock;
6252
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6262
	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6253
	int refclk = 100000;
6263
	int refclk = 100000;
6254
 
6264
 
6255
	mutex_lock(&dev_priv->dpio_lock);
6265
	mutex_lock(&dev_priv->dpio_lock);
6256
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6266
	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6257
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6267
	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6258
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6268
	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6259
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6269
	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6260
	mutex_unlock(&dev_priv->dpio_lock);
6270
	mutex_unlock(&dev_priv->dpio_lock);
6261
 
6271
 
6262
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6272
	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6263
	clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6273
	clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6264
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6274
	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6265
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6275
	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6266
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6276
	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6267
 
6277
 
6268
	chv_clock(refclk, &clock);
6278
	chv_clock(refclk, &clock);
6269
 
6279
 
6270
	/* clock.dot is the fast clock */
6280
	/* clock.dot is the fast clock */
6271
	pipe_config->port_clock = clock.dot / 5;
6281
	pipe_config->port_clock = clock.dot / 5;
6272
}
6282
}
6273
 
6283
 
6274
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6284
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6275
				 struct intel_crtc_config *pipe_config)
6285
				 struct intel_crtc_config *pipe_config)
6276
{
6286
{
6277
	struct drm_device *dev = crtc->base.dev;
6287
	struct drm_device *dev = crtc->base.dev;
6278
	struct drm_i915_private *dev_priv = dev->dev_private;
6288
	struct drm_i915_private *dev_priv = dev->dev_private;
6279
	uint32_t tmp;
6289
	uint32_t tmp;
6280
 
6290
 
6281
	if (!intel_display_power_enabled(dev_priv,
6291
	if (!intel_display_power_enabled(dev_priv,
6282
					 POWER_DOMAIN_PIPE(crtc->pipe)))
6292
					 POWER_DOMAIN_PIPE(crtc->pipe)))
6283
		return false;
6293
		return false;
6284
 
6294
 
6285
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6295
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6286
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6296
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6287
 
6297
 
6288
	tmp = I915_READ(PIPECONF(crtc->pipe));
6298
	tmp = I915_READ(PIPECONF(crtc->pipe));
6289
	if (!(tmp & PIPECONF_ENABLE))
6299
	if (!(tmp & PIPECONF_ENABLE))
6290
		return false;
6300
		return false;
6291
 
6301
 
6292
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6302
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6293
		switch (tmp & PIPECONF_BPC_MASK) {
6303
		switch (tmp & PIPECONF_BPC_MASK) {
6294
		case PIPECONF_6BPC:
6304
		case PIPECONF_6BPC:
6295
			pipe_config->pipe_bpp = 18;
6305
			pipe_config->pipe_bpp = 18;
6296
			break;
6306
			break;
6297
		case PIPECONF_8BPC:
6307
		case PIPECONF_8BPC:
6298
			pipe_config->pipe_bpp = 24;
6308
			pipe_config->pipe_bpp = 24;
6299
			break;
6309
			break;
6300
		case PIPECONF_10BPC:
6310
		case PIPECONF_10BPC:
6301
			pipe_config->pipe_bpp = 30;
6311
			pipe_config->pipe_bpp = 30;
6302
			break;
6312
			break;
6303
		default:
6313
		default:
6304
			break;
6314
			break;
6305
		}
6315
		}
6306
	}
6316
	}
6307
 
6317
 
6308
	if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6318
	if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6309
		pipe_config->limited_color_range = true;
6319
		pipe_config->limited_color_range = true;
6310
 
6320
 
6311
	if (INTEL_INFO(dev)->gen < 4)
6321
	if (INTEL_INFO(dev)->gen < 4)
6312
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6322
		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6313
 
6323
 
6314
	intel_get_pipe_timings(crtc, pipe_config);
6324
	intel_get_pipe_timings(crtc, pipe_config);
6315
 
6325
 
6316
	i9xx_get_pfit_config(crtc, pipe_config);
6326
	i9xx_get_pfit_config(crtc, pipe_config);
6317
 
6327
 
6318
	if (INTEL_INFO(dev)->gen >= 4) {
6328
	if (INTEL_INFO(dev)->gen >= 4) {
6319
		tmp = I915_READ(DPLL_MD(crtc->pipe));
6329
		tmp = I915_READ(DPLL_MD(crtc->pipe));
6320
		pipe_config->pixel_multiplier =
6330
		pipe_config->pixel_multiplier =
6321
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6331
			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6322
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6332
			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6323
		pipe_config->dpll_hw_state.dpll_md = tmp;
6333
		pipe_config->dpll_hw_state.dpll_md = tmp;
6324
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6334
	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6325
		tmp = I915_READ(DPLL(crtc->pipe));
6335
		tmp = I915_READ(DPLL(crtc->pipe));
6326
		pipe_config->pixel_multiplier =
6336
		pipe_config->pixel_multiplier =
6327
			((tmp & SDVO_MULTIPLIER_MASK)
6337
			((tmp & SDVO_MULTIPLIER_MASK)
6328
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6338
			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6329
	} else {
6339
	} else {
6330
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
6340
		/* Note that on i915G/GM the pixel multiplier is in the sdvo
6331
		 * port and will be fixed up in the encoder->get_config
6341
		 * port and will be fixed up in the encoder->get_config
6332
		 * function. */
6342
		 * function. */
6333
		pipe_config->pixel_multiplier = 1;
6343
		pipe_config->pixel_multiplier = 1;
6334
	}
6344
	}
6335
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6345
	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6336
	if (!IS_VALLEYVIEW(dev)) {
6346
	if (!IS_VALLEYVIEW(dev)) {
6337
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6347
		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6338
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6348
		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6339
	} else {
6349
	} else {
6340
		/* Mask out read-only status bits. */
6350
		/* Mask out read-only status bits. */
6341
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6351
		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6342
						     DPLL_PORTC_READY_MASK |
6352
						     DPLL_PORTC_READY_MASK |
6343
						     DPLL_PORTB_READY_MASK);
6353
						     DPLL_PORTB_READY_MASK);
6344
	}
6354
	}
6345
 
6355
 
6346
	if (IS_CHERRYVIEW(dev))
6356
	if (IS_CHERRYVIEW(dev))
6347
		chv_crtc_clock_get(crtc, pipe_config);
6357
		chv_crtc_clock_get(crtc, pipe_config);
6348
	else if (IS_VALLEYVIEW(dev))
6358
	else if (IS_VALLEYVIEW(dev))
6349
		vlv_crtc_clock_get(crtc, pipe_config);
6359
		vlv_crtc_clock_get(crtc, pipe_config);
6350
	else
6360
	else
6351
		i9xx_crtc_clock_get(crtc, pipe_config);
6361
		i9xx_crtc_clock_get(crtc, pipe_config);
6352
 
6362
 
6353
	return true;
6363
	return true;
6354
}
6364
}
6355
 
6365
 
6356
static void ironlake_init_pch_refclk(struct drm_device *dev)
6366
static void ironlake_init_pch_refclk(struct drm_device *dev)
6357
{
6367
{
6358
	struct drm_i915_private *dev_priv = dev->dev_private;
6368
	struct drm_i915_private *dev_priv = dev->dev_private;
6359
	struct drm_mode_config *mode_config = &dev->mode_config;
6369
	struct drm_mode_config *mode_config = &dev->mode_config;
6360
	struct intel_encoder *encoder;
6370
	struct intel_encoder *encoder;
6361
	u32 val, final;
6371
	u32 val, final;
6362
	bool has_lvds = false;
6372
	bool has_lvds = false;
6363
	bool has_cpu_edp = false;
6373
	bool has_cpu_edp = false;
6364
	bool has_panel = false;
6374
	bool has_panel = false;
6365
	bool has_ck505 = false;
6375
	bool has_ck505 = false;
6366
	bool can_ssc = false;
6376
	bool can_ssc = false;
6367
 
6377
 
6368
	/* We need to take the global config into account */
6378
	/* We need to take the global config into account */
6369
		list_for_each_entry(encoder, &mode_config->encoder_list,
6379
		list_for_each_entry(encoder, &mode_config->encoder_list,
6370
				    base.head) {
6380
				    base.head) {
6371
			switch (encoder->type) {
6381
			switch (encoder->type) {
6372
			case INTEL_OUTPUT_LVDS:
6382
			case INTEL_OUTPUT_LVDS:
6373
			has_panel = true;
6383
			has_panel = true;
6374
				has_lvds = true;
6384
				has_lvds = true;
6375
			break;
6385
			break;
6376
			case INTEL_OUTPUT_EDP:
6386
			case INTEL_OUTPUT_EDP:
6377
			has_panel = true;
6387
			has_panel = true;
6378
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
6388
			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
6379
				has_cpu_edp = true;
6389
				has_cpu_edp = true;
6380
				break;
6390
				break;
6381
			}
6391
			}
6382
		}
6392
		}
6383
 
6393
 
6384
	if (HAS_PCH_IBX(dev)) {
6394
	if (HAS_PCH_IBX(dev)) {
6385
		has_ck505 = dev_priv->vbt.display_clock_mode;
6395
		has_ck505 = dev_priv->vbt.display_clock_mode;
6386
		can_ssc = has_ck505;
6396
		can_ssc = has_ck505;
6387
	} else {
6397
	} else {
6388
		has_ck505 = false;
6398
		has_ck505 = false;
6389
		can_ssc = true;
6399
		can_ssc = true;
6390
	}
6400
	}
6391
 
6401
 
6392
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
6402
	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
6393
		      has_panel, has_lvds, has_ck505);
6403
		      has_panel, has_lvds, has_ck505);
6394
 
6404
 
6395
	/* Ironlake: try to setup display ref clock before DPLL
6405
	/* Ironlake: try to setup display ref clock before DPLL
6396
	 * enabling. This is only under driver's control after
6406
	 * enabling. This is only under driver's control after
6397
	 * PCH B stepping, previous chipset stepping should be
6407
	 * PCH B stepping, previous chipset stepping should be
6398
	 * ignoring this setting.
6408
	 * ignoring this setting.
6399
	 */
6409
	 */
6400
	val = I915_READ(PCH_DREF_CONTROL);
6410
	val = I915_READ(PCH_DREF_CONTROL);
6401
 
6411
 
6402
	/* As we must carefully and slowly disable/enable each source in turn,
6412
	/* As we must carefully and slowly disable/enable each source in turn,
6403
	 * compute the final state we want first and check if we need to
6413
	 * compute the final state we want first and check if we need to
6404
	 * make any changes at all.
6414
	 * make any changes at all.
6405
	 */
6415
	 */
6406
	final = val;
6416
	final = val;
6407
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
6417
	final &= ~DREF_NONSPREAD_SOURCE_MASK;
6408
	if (has_ck505)
6418
	if (has_ck505)
6409
		final |= DREF_NONSPREAD_CK505_ENABLE;
6419
		final |= DREF_NONSPREAD_CK505_ENABLE;
6410
	else
6420
	else
6411
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
6421
		final |= DREF_NONSPREAD_SOURCE_ENABLE;
6412
 
6422
 
6413
	final &= ~DREF_SSC_SOURCE_MASK;
6423
	final &= ~DREF_SSC_SOURCE_MASK;
6414
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6424
	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6415
	final &= ~DREF_SSC1_ENABLE;
6425
	final &= ~DREF_SSC1_ENABLE;
6416
 
6426
 
6417
	if (has_panel) {
6427
	if (has_panel) {
6418
		final |= DREF_SSC_SOURCE_ENABLE;
6428
		final |= DREF_SSC_SOURCE_ENABLE;
6419
 
6429
 
6420
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
6430
		if (intel_panel_use_ssc(dev_priv) && can_ssc)
6421
			final |= DREF_SSC1_ENABLE;
6431
			final |= DREF_SSC1_ENABLE;
6422
 
6432
 
6423
		if (has_cpu_edp) {
6433
		if (has_cpu_edp) {
6424
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
6434
			if (intel_panel_use_ssc(dev_priv) && can_ssc)
6425
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6435
				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6426
			else
6436
			else
6427
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6437
				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6428
		} else
6438
		} else
6429
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6439
			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6430
	} else {
6440
	} else {
6431
		final |= DREF_SSC_SOURCE_DISABLE;
6441
		final |= DREF_SSC_SOURCE_DISABLE;
6432
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6442
		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6433
	}
6443
	}
6434
 
6444
 
6435
	if (final == val)
6445
	if (final == val)
6436
		return;
6446
		return;
6437
 
6447
 
6438
	/* Always enable nonspread source */
6448
	/* Always enable nonspread source */
6439
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
6449
	val &= ~DREF_NONSPREAD_SOURCE_MASK;
6440
 
6450
 
6441
	if (has_ck505)
6451
	if (has_ck505)
6442
		val |= DREF_NONSPREAD_CK505_ENABLE;
6452
		val |= DREF_NONSPREAD_CK505_ENABLE;
6443
	else
6453
	else
6444
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
6454
		val |= DREF_NONSPREAD_SOURCE_ENABLE;
6445
 
6455
 
6446
	if (has_panel) {
6456
	if (has_panel) {
6447
		val &= ~DREF_SSC_SOURCE_MASK;
6457
		val &= ~DREF_SSC_SOURCE_MASK;
6448
		val |= DREF_SSC_SOURCE_ENABLE;
6458
		val |= DREF_SSC_SOURCE_ENABLE;
6449
 
6459
 
6450
		/* SSC must be turned on before enabling the CPU output  */
6460
		/* SSC must be turned on before enabling the CPU output  */
6451
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6461
		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6452
			DRM_DEBUG_KMS("Using SSC on panel\n");
6462
			DRM_DEBUG_KMS("Using SSC on panel\n");
6453
			val |= DREF_SSC1_ENABLE;
6463
			val |= DREF_SSC1_ENABLE;
6454
		} else
6464
		} else
6455
			val &= ~DREF_SSC1_ENABLE;
6465
			val &= ~DREF_SSC1_ENABLE;
6456
 
6466
 
6457
		/* Get SSC going before enabling the outputs */
6467
		/* Get SSC going before enabling the outputs */
6458
		I915_WRITE(PCH_DREF_CONTROL, val);
6468
		I915_WRITE(PCH_DREF_CONTROL, val);
6459
			POSTING_READ(PCH_DREF_CONTROL);
6469
			POSTING_READ(PCH_DREF_CONTROL);
6460
			udelay(200);
6470
			udelay(200);
6461
 
6471
 
6462
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6472
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6463
 
6473
 
6464
		/* Enable CPU source on CPU attached eDP */
6474
		/* Enable CPU source on CPU attached eDP */
6465
		if (has_cpu_edp) {
6475
		if (has_cpu_edp) {
6466
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6476
			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6467
				DRM_DEBUG_KMS("Using SSC on eDP\n");
6477
				DRM_DEBUG_KMS("Using SSC on eDP\n");
6468
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6478
				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6469
			} else
6479
			} else
6470
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6480
				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6471
		} else
6481
		} else
6472
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6482
			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6473
 
6483
 
6474
		I915_WRITE(PCH_DREF_CONTROL, val);
6484
		I915_WRITE(PCH_DREF_CONTROL, val);
6475
		POSTING_READ(PCH_DREF_CONTROL);
6485
		POSTING_READ(PCH_DREF_CONTROL);
6476
		udelay(200);
6486
		udelay(200);
6477
		} else {
6487
		} else {
6478
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
6488
		DRM_DEBUG_KMS("Disabling SSC entirely\n");
6479
 
6489
 
6480
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6490
		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6481
 
6491
 
6482
		/* Turn off CPU output */
6492
		/* Turn off CPU output */
6483
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6493
		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6484
 
6494
 
6485
		I915_WRITE(PCH_DREF_CONTROL, val);
6495
		I915_WRITE(PCH_DREF_CONTROL, val);
6486
		POSTING_READ(PCH_DREF_CONTROL);
6496
		POSTING_READ(PCH_DREF_CONTROL);
6487
		udelay(200);
6497
		udelay(200);
6488
 
6498
 
6489
		/* Turn off the SSC source */
6499
		/* Turn off the SSC source */
6490
		val &= ~DREF_SSC_SOURCE_MASK;
6500
		val &= ~DREF_SSC_SOURCE_MASK;
6491
		val |= DREF_SSC_SOURCE_DISABLE;
6501
		val |= DREF_SSC_SOURCE_DISABLE;
6492
 
6502
 
6493
		/* Turn off SSC1 */
6503
		/* Turn off SSC1 */
6494
		val &= ~DREF_SSC1_ENABLE;
6504
		val &= ~DREF_SSC1_ENABLE;
6495
 
6505
 
6496
		I915_WRITE(PCH_DREF_CONTROL, val);
6506
		I915_WRITE(PCH_DREF_CONTROL, val);
6497
		POSTING_READ(PCH_DREF_CONTROL);
6507
		POSTING_READ(PCH_DREF_CONTROL);
6498
		udelay(200);
6508
		udelay(200);
6499
	}
6509
	}
6500
 
6510
 
6501
	BUG_ON(val != final);
6511
	BUG_ON(val != final);
6502
}
6512
}
6503
 
6513
 
6504
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
6514
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
6505
{
6515
{
6506
	uint32_t tmp;
6516
	uint32_t tmp;
6507
 
6517
 
6508
		tmp = I915_READ(SOUTH_CHICKEN2);
6518
		tmp = I915_READ(SOUTH_CHICKEN2);
6509
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6519
		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6510
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6520
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6511
 
6521
 
6512
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
6522
		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
6513
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6523
				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6514
			DRM_ERROR("FDI mPHY reset assert timeout\n");
6524
			DRM_ERROR("FDI mPHY reset assert timeout\n");
6515
 
6525
 
6516
		tmp = I915_READ(SOUTH_CHICKEN2);
6526
		tmp = I915_READ(SOUTH_CHICKEN2);
6517
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6527
		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6518
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6528
		I915_WRITE(SOUTH_CHICKEN2, tmp);
6519
 
6529
 
6520
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
6530
		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
6521
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6531
				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6522
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
6532
			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
6523
}
6533
}
6524
 
6534
 
6525
/* WaMPhyProgramming:hsw */
6535
/* WaMPhyProgramming:hsw */
6526
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6536
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6527
{
6537
{
6528
	uint32_t tmp;
6538
	uint32_t tmp;
6529
 
6539
 
6530
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6540
	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6531
	tmp &= ~(0xFF << 24);
6541
	tmp &= ~(0xFF << 24);
6532
	tmp |= (0x12 << 24);
6542
	tmp |= (0x12 << 24);
6533
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6543
	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6534
 
6544
 
6535
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6545
	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6536
	tmp |= (1 << 11);
6546
	tmp |= (1 << 11);
6537
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6547
	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6538
 
6548
 
6539
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6549
	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6540
	tmp |= (1 << 11);
6550
	tmp |= (1 << 11);
6541
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6551
	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6542
 
6552
 
6543
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6553
	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6544
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6554
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6545
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6555
	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6546
 
6556
 
6547
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6557
	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6548
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6558
	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6549
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6559
	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6550
 
6560
 
6551
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6561
		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6552
		tmp &= ~(7 << 13);
6562
		tmp &= ~(7 << 13);
6553
		tmp |= (5 << 13);
6563
		tmp |= (5 << 13);
6554
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6564
		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6555
 
6565
 
6556
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6566
		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6557
		tmp &= ~(7 << 13);
6567
		tmp &= ~(7 << 13);
6558
		tmp |= (5 << 13);
6568
		tmp |= (5 << 13);
6559
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6569
		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6560
 
6570
 
6561
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6571
	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6562
	tmp &= ~0xFF;
6572
	tmp &= ~0xFF;
6563
	tmp |= 0x1C;
6573
	tmp |= 0x1C;
6564
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6574
	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6565
 
6575
 
6566
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6576
	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6567
	tmp &= ~0xFF;
6577
	tmp &= ~0xFF;
6568
	tmp |= 0x1C;
6578
	tmp |= 0x1C;
6569
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6579
	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6570
 
6580
 
6571
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6581
	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6572
	tmp &= ~(0xFF << 16);
6582
	tmp &= ~(0xFF << 16);
6573
	tmp |= (0x1C << 16);
6583
	tmp |= (0x1C << 16);
6574
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6584
	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6575
 
6585
 
6576
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6586
	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6577
	tmp &= ~(0xFF << 16);
6587
	tmp &= ~(0xFF << 16);
6578
	tmp |= (0x1C << 16);
6588
	tmp |= (0x1C << 16);
6579
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6589
	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6580
 
6590
 
6581
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
6591
		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
6582
		tmp |= (1 << 27);
6592
		tmp |= (1 << 27);
6583
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
6593
		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
6584
 
6594
 
6585
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
6595
		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
6586
		tmp |= (1 << 27);
6596
		tmp |= (1 << 27);
6587
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
6597
		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
6588
 
6598
 
6589
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
6599
		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
6590
		tmp &= ~(0xF << 28);
6600
		tmp &= ~(0xF << 28);
6591
		tmp |= (4 << 28);
6601
		tmp |= (4 << 28);
6592
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
6602
		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
6593
 
6603
 
6594
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
6604
		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
6595
		tmp &= ~(0xF << 28);
6605
		tmp &= ~(0xF << 28);
6596
		tmp |= (4 << 28);
6606
		tmp |= (4 << 28);
6597
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
6607
		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
6598
}
6608
}
6599
 
6609
 
6600
/* Implements 3 different sequences from BSpec chapter "Display iCLK
6610
/* Implements 3 different sequences from BSpec chapter "Display iCLK
6601
 * Programming" based on the parameters passed:
6611
 * Programming" based on the parameters passed:
6602
 * - Sequence to enable CLKOUT_DP
6612
 * - Sequence to enable CLKOUT_DP
6603
 * - Sequence to enable CLKOUT_DP without spread
6613
 * - Sequence to enable CLKOUT_DP without spread
6604
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
6614
 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
6605
 */
6615
 */
6606
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
6616
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
6607
				 bool with_fdi)
6617
				 bool with_fdi)
6608
{
6618
{
6609
	struct drm_i915_private *dev_priv = dev->dev_private;
6619
	struct drm_i915_private *dev_priv = dev->dev_private;
6610
	uint32_t reg, tmp;
6620
	uint32_t reg, tmp;
6611
 
6621
 
6612
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
6622
	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
6613
		with_spread = true;
6623
		with_spread = true;
6614
	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
6624
	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
6615
		 with_fdi, "LP PCH doesn't have FDI\n"))
6625
		 with_fdi, "LP PCH doesn't have FDI\n"))
6616
		with_fdi = false;
6626
		with_fdi = false;
6617
 
6627
 
6618
	mutex_lock(&dev_priv->dpio_lock);
6628
	mutex_lock(&dev_priv->dpio_lock);
6619
 
6629
 
6620
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6630
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6621
	tmp &= ~SBI_SSCCTL_DISABLE;
6631
	tmp &= ~SBI_SSCCTL_DISABLE;
6622
	tmp |= SBI_SSCCTL_PATHALT;
6632
	tmp |= SBI_SSCCTL_PATHALT;
6623
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6633
	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6624
 
6634
 
6625
	udelay(24);
6635
	udelay(24);
6626
 
6636
 
6627
	if (with_spread) {
6637
	if (with_spread) {
6628
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6638
		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6629
		tmp &= ~SBI_SSCCTL_PATHALT;
6639
		tmp &= ~SBI_SSCCTL_PATHALT;
6630
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6640
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6631
 
6641
 
6632
		if (with_fdi) {
6642
		if (with_fdi) {
6633
			lpt_reset_fdi_mphy(dev_priv);
6643
			lpt_reset_fdi_mphy(dev_priv);
6634
			lpt_program_fdi_mphy(dev_priv);
6644
			lpt_program_fdi_mphy(dev_priv);
6635
		}
6645
		}
6636
	}
6646
	}
6637
 
6647
 
6638
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6648
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6639
	       SBI_GEN0 : SBI_DBUFF0;
6649
	       SBI_GEN0 : SBI_DBUFF0;
6640
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6650
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6641
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6651
	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6642
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6652
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6643
 
6653
 
6644
	mutex_unlock(&dev_priv->dpio_lock);
6654
	mutex_unlock(&dev_priv->dpio_lock);
6645
}
6655
}
6646
 
6656
 
6647
/* Sequence to disable CLKOUT_DP */
6657
/* Sequence to disable CLKOUT_DP */
6648
static void lpt_disable_clkout_dp(struct drm_device *dev)
6658
static void lpt_disable_clkout_dp(struct drm_device *dev)
6649
{
6659
{
6650
	struct drm_i915_private *dev_priv = dev->dev_private;
6660
	struct drm_i915_private *dev_priv = dev->dev_private;
6651
	uint32_t reg, tmp;
6661
	uint32_t reg, tmp;
6652
 
6662
 
6653
	mutex_lock(&dev_priv->dpio_lock);
6663
	mutex_lock(&dev_priv->dpio_lock);
6654
 
6664
 
6655
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6665
	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6656
	       SBI_GEN0 : SBI_DBUFF0;
6666
	       SBI_GEN0 : SBI_DBUFF0;
6657
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6667
	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6658
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6668
	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6659
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6669
	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6660
 
6670
 
6661
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6671
	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6662
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
6672
	if (!(tmp & SBI_SSCCTL_DISABLE)) {
6663
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
6673
		if (!(tmp & SBI_SSCCTL_PATHALT)) {
6664
			tmp |= SBI_SSCCTL_PATHALT;
6674
			tmp |= SBI_SSCCTL_PATHALT;
6665
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6675
			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6666
			udelay(32);
6676
			udelay(32);
6667
		}
6677
		}
6668
		tmp |= SBI_SSCCTL_DISABLE;
6678
		tmp |= SBI_SSCCTL_DISABLE;
6669
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6679
		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6670
	}
6680
	}
6671
 
6681
 
6672
	mutex_unlock(&dev_priv->dpio_lock);
6682
	mutex_unlock(&dev_priv->dpio_lock);
6673
}
6683
}
6674
 
6684
 
6675
static void lpt_init_pch_refclk(struct drm_device *dev)
6685
static void lpt_init_pch_refclk(struct drm_device *dev)
6676
{
6686
{
6677
	struct drm_mode_config *mode_config = &dev->mode_config;
6687
	struct drm_mode_config *mode_config = &dev->mode_config;
6678
	struct intel_encoder *encoder;
6688
	struct intel_encoder *encoder;
6679
	bool has_vga = false;
6689
	bool has_vga = false;
6680
 
6690
 
6681
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
6691
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
6682
		switch (encoder->type) {
6692
		switch (encoder->type) {
6683
		case INTEL_OUTPUT_ANALOG:
6693
		case INTEL_OUTPUT_ANALOG:
6684
			has_vga = true;
6694
			has_vga = true;
6685
			break;
6695
			break;
6686
		}
6696
		}
6687
	}
6697
	}
6688
 
6698
 
6689
	if (has_vga)
6699
	if (has_vga)
6690
		lpt_enable_clkout_dp(dev, true, true);
6700
		lpt_enable_clkout_dp(dev, true, true);
6691
	else
6701
	else
6692
		lpt_disable_clkout_dp(dev);
6702
		lpt_disable_clkout_dp(dev);
6693
}
6703
}
6694
 
6704
 
6695
/*
6705
/*
6696
 * Initialize reference clocks when the driver loads
6706
 * Initialize reference clocks when the driver loads
6697
 */
6707
 */
6698
void intel_init_pch_refclk(struct drm_device *dev)
6708
void intel_init_pch_refclk(struct drm_device *dev)
6699
{
6709
{
6700
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
6710
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
6701
		ironlake_init_pch_refclk(dev);
6711
		ironlake_init_pch_refclk(dev);
6702
	else if (HAS_PCH_LPT(dev))
6712
	else if (HAS_PCH_LPT(dev))
6703
		lpt_init_pch_refclk(dev);
6713
		lpt_init_pch_refclk(dev);
6704
}
6714
}
6705
 
6715
 
6706
static int ironlake_get_refclk(struct drm_crtc *crtc)
6716
static int ironlake_get_refclk(struct drm_crtc *crtc)
6707
{
6717
{
6708
	struct drm_device *dev = crtc->dev;
6718
	struct drm_device *dev = crtc->dev;
6709
	struct drm_i915_private *dev_priv = dev->dev_private;
6719
	struct drm_i915_private *dev_priv = dev->dev_private;
6710
	struct intel_encoder *encoder;
6720
	struct intel_encoder *encoder;
6711
	int num_connectors = 0;
6721
	int num_connectors = 0;
6712
	bool is_lvds = false;
6722
	bool is_lvds = false;
6713
 
6723
 
6714
	for_each_encoder_on_crtc(dev, crtc, encoder) {
6724
	for_each_encoder_on_crtc(dev, crtc, encoder) {
6715
		switch (encoder->type) {
6725
		switch (encoder->type) {
6716
		case INTEL_OUTPUT_LVDS:
6726
		case INTEL_OUTPUT_LVDS:
6717
			is_lvds = true;
6727
			is_lvds = true;
6718
			break;
6728
			break;
6719
		}
6729
		}
6720
		num_connectors++;
6730
		num_connectors++;
6721
	}
6731
	}
6722
 
6732
 
6723
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
6733
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
6724
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
6734
		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
6725
			      dev_priv->vbt.lvds_ssc_freq);
6735
			      dev_priv->vbt.lvds_ssc_freq);
6726
		return dev_priv->vbt.lvds_ssc_freq;
6736
		return dev_priv->vbt.lvds_ssc_freq;
6727
	}
6737
	}
6728
 
6738
 
6729
	return 120000;
6739
	return 120000;
6730
}
6740
}
6731
 
6741
 
6732
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
6742
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
6733
{
6743
{
6734
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
6744
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
6735
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6745
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6736
	int pipe = intel_crtc->pipe;
6746
	int pipe = intel_crtc->pipe;
6737
	uint32_t val;
6747
	uint32_t val;
6738
 
6748
 
6739
	val = 0;
6749
	val = 0;
6740
 
6750
 
6741
	switch (intel_crtc->config.pipe_bpp) {
6751
	switch (intel_crtc->config.pipe_bpp) {
6742
	case 18:
6752
	case 18:
6743
		val |= PIPECONF_6BPC;
6753
		val |= PIPECONF_6BPC;
6744
		break;
6754
		break;
6745
	case 24:
6755
	case 24:
6746
		val |= PIPECONF_8BPC;
6756
		val |= PIPECONF_8BPC;
6747
		break;
6757
		break;
6748
	case 30:
6758
	case 30:
6749
		val |= PIPECONF_10BPC;
6759
		val |= PIPECONF_10BPC;
6750
		break;
6760
		break;
6751
	case 36:
6761
	case 36:
6752
		val |= PIPECONF_12BPC;
6762
		val |= PIPECONF_12BPC;
6753
		break;
6763
		break;
6754
	default:
6764
	default:
6755
		/* Case prevented by intel_choose_pipe_bpp_dither. */
6765
		/* Case prevented by intel_choose_pipe_bpp_dither. */
6756
		BUG();
6766
		BUG();
6757
	}
6767
	}
6758
 
6768
 
6759
	if (intel_crtc->config.dither)
6769
	if (intel_crtc->config.dither)
6760
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6770
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6761
 
6771
 
6762
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6772
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6763
		val |= PIPECONF_INTERLACED_ILK;
6773
		val |= PIPECONF_INTERLACED_ILK;
6764
	else
6774
	else
6765
		val |= PIPECONF_PROGRESSIVE;
6775
		val |= PIPECONF_PROGRESSIVE;
6766
 
6776
 
6767
	if (intel_crtc->config.limited_color_range)
6777
	if (intel_crtc->config.limited_color_range)
6768
		val |= PIPECONF_COLOR_RANGE_SELECT;
6778
		val |= PIPECONF_COLOR_RANGE_SELECT;
6769
 
6779
 
6770
	I915_WRITE(PIPECONF(pipe), val);
6780
	I915_WRITE(PIPECONF(pipe), val);
6771
	POSTING_READ(PIPECONF(pipe));
6781
	POSTING_READ(PIPECONF(pipe));
6772
}
6782
}
6773
 
6783
 
6774
/*
6784
/*
6775
 * Set up the pipe CSC unit.
6785
 * Set up the pipe CSC unit.
6776
 *
6786
 *
6777
 * Currently only full range RGB to limited range RGB conversion
6787
 * Currently only full range RGB to limited range RGB conversion
6778
 * is supported, but eventually this should handle various
6788
 * is supported, but eventually this should handle various
6779
 * RGB<->YCbCr scenarios as well.
6789
 * RGB<->YCbCr scenarios as well.
6780
 */
6790
 */
6781
static void intel_set_pipe_csc(struct drm_crtc *crtc)
6791
static void intel_set_pipe_csc(struct drm_crtc *crtc)
6782
{
6792
{
6783
	struct drm_device *dev = crtc->dev;
6793
	struct drm_device *dev = crtc->dev;
6784
	struct drm_i915_private *dev_priv = dev->dev_private;
6794
	struct drm_i915_private *dev_priv = dev->dev_private;
6785
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6795
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6786
	int pipe = intel_crtc->pipe;
6796
	int pipe = intel_crtc->pipe;
6787
	uint16_t coeff = 0x7800; /* 1.0 */
6797
	uint16_t coeff = 0x7800; /* 1.0 */
6788
 
6798
 
6789
	/*
6799
	/*
6790
	 * TODO: Check what kind of values actually come out of the pipe
6800
	 * TODO: Check what kind of values actually come out of the pipe
6791
	 * with these coeff/postoff values and adjust to get the best
6801
	 * with these coeff/postoff values and adjust to get the best
6792
	 * accuracy. Perhaps we even need to take the bpc value into
6802
	 * accuracy. Perhaps we even need to take the bpc value into
6793
	 * consideration.
6803
	 * consideration.
6794
	 */
6804
	 */
6795
 
6805
 
6796
	if (intel_crtc->config.limited_color_range)
6806
	if (intel_crtc->config.limited_color_range)
6797
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
6807
		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
6798
 
6808
 
6799
	/*
6809
	/*
6800
	 * GY/GU and RY/RU should be the other way around according
6810
	 * GY/GU and RY/RU should be the other way around according
6801
	 * to BSpec, but reality doesn't agree. Just set them up in
6811
	 * to BSpec, but reality doesn't agree. Just set them up in
6802
	 * a way that results in the correct picture.
6812
	 * a way that results in the correct picture.
6803
	 */
6813
	 */
6804
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
6814
	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
6805
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
6815
	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
6806
 
6816
 
6807
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
6817
	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
6808
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
6818
	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
6809
 
6819
 
6810
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
6820
	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
6811
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
6821
	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
6812
 
6822
 
6813
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
6823
	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
6814
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
6824
	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
6815
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
6825
	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
6816
 
6826
 
6817
	if (INTEL_INFO(dev)->gen > 6) {
6827
	if (INTEL_INFO(dev)->gen > 6) {
6818
		uint16_t postoff = 0;
6828
		uint16_t postoff = 0;
6819
 
6829
 
6820
		if (intel_crtc->config.limited_color_range)
6830
		if (intel_crtc->config.limited_color_range)
6821
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
6831
			postoff = (16 * (1 << 12) / 255) & 0x1fff;
6822
 
6832
 
6823
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6833
		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6824
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
6834
		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
6825
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
6835
		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
6826
 
6836
 
6827
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
6837
		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
6828
	} else {
6838
	} else {
6829
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
6839
		uint32_t mode = CSC_MODE_YUV_TO_RGB;
6830
 
6840
 
6831
		if (intel_crtc->config.limited_color_range)
6841
		if (intel_crtc->config.limited_color_range)
6832
			mode |= CSC_BLACK_SCREEN_OFFSET;
6842
			mode |= CSC_BLACK_SCREEN_OFFSET;
6833
 
6843
 
6834
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
6844
		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
6835
	}
6845
	}
6836
}
6846
}
6837
 
6847
 
6838
static void haswell_set_pipeconf(struct drm_crtc *crtc)
6848
static void haswell_set_pipeconf(struct drm_crtc *crtc)
6839
{
6849
{
6840
	struct drm_device *dev = crtc->dev;
6850
	struct drm_device *dev = crtc->dev;
6841
	struct drm_i915_private *dev_priv = dev->dev_private;
6851
	struct drm_i915_private *dev_priv = dev->dev_private;
6842
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6852
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6843
	enum pipe pipe = intel_crtc->pipe;
6853
	enum pipe pipe = intel_crtc->pipe;
6844
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6854
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6845
	uint32_t val;
6855
	uint32_t val;
6846
 
6856
 
6847
	val = 0;
6857
	val = 0;
6848
 
6858
 
6849
	if (IS_HASWELL(dev) && intel_crtc->config.dither)
6859
	if (IS_HASWELL(dev) && intel_crtc->config.dither)
6850
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6860
		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6851
 
6861
 
6852
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6862
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6853
		val |= PIPECONF_INTERLACED_ILK;
6863
		val |= PIPECONF_INTERLACED_ILK;
6854
	else
6864
	else
6855
		val |= PIPECONF_PROGRESSIVE;
6865
		val |= PIPECONF_PROGRESSIVE;
6856
 
6866
 
6857
	I915_WRITE(PIPECONF(cpu_transcoder), val);
6867
	I915_WRITE(PIPECONF(cpu_transcoder), val);
6858
	POSTING_READ(PIPECONF(cpu_transcoder));
6868
	POSTING_READ(PIPECONF(cpu_transcoder));
6859
 
6869
 
6860
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6870
	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6861
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
6871
	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
6862
 
6872
 
6863
	if (IS_BROADWELL(dev)) {
6873
	if (IS_BROADWELL(dev)) {
6864
		val = 0;
6874
		val = 0;
6865
 
6875
 
6866
		switch (intel_crtc->config.pipe_bpp) {
6876
		switch (intel_crtc->config.pipe_bpp) {
6867
		case 18:
6877
		case 18:
6868
			val |= PIPEMISC_DITHER_6_BPC;
6878
			val |= PIPEMISC_DITHER_6_BPC;
6869
			break;
6879
			break;
6870
		case 24:
6880
		case 24:
6871
			val |= PIPEMISC_DITHER_8_BPC;
6881
			val |= PIPEMISC_DITHER_8_BPC;
6872
			break;
6882
			break;
6873
		case 30:
6883
		case 30:
6874
			val |= PIPEMISC_DITHER_10_BPC;
6884
			val |= PIPEMISC_DITHER_10_BPC;
6875
			break;
6885
			break;
6876
		case 36:
6886
		case 36:
6877
			val |= PIPEMISC_DITHER_12_BPC;
6887
			val |= PIPEMISC_DITHER_12_BPC;
6878
			break;
6888
			break;
6879
		default:
6889
		default:
6880
			/* Case prevented by pipe_config_set_bpp. */
6890
			/* Case prevented by pipe_config_set_bpp. */
6881
			BUG();
6891
			BUG();
6882
		}
6892
		}
6883
 
6893
 
6884
		if (intel_crtc->config.dither)
6894
		if (intel_crtc->config.dither)
6885
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6895
			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6886
 
6896
 
6887
		I915_WRITE(PIPEMISC(pipe), val);
6897
		I915_WRITE(PIPEMISC(pipe), val);
6888
	}
6898
	}
6889
}
6899
}
6890
 
6900
 
6891
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6901
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6892
				    intel_clock_t *clock,
6902
				    intel_clock_t *clock,
6893
				    bool *has_reduced_clock,
6903
				    bool *has_reduced_clock,
6894
				    intel_clock_t *reduced_clock)
6904
				    intel_clock_t *reduced_clock)
6895
{
6905
{
6896
	struct drm_device *dev = crtc->dev;
6906
	struct drm_device *dev = crtc->dev;
6897
	struct drm_i915_private *dev_priv = dev->dev_private;
6907
	struct drm_i915_private *dev_priv = dev->dev_private;
6898
	struct intel_encoder *intel_encoder;
6908
	struct intel_encoder *intel_encoder;
6899
	int refclk;
6909
	int refclk;
6900
	const intel_limit_t *limit;
6910
	const intel_limit_t *limit;
6901
	bool ret, is_lvds = false;
6911
	bool ret, is_lvds = false;
6902
 
6912
 
6903
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6913
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6904
		switch (intel_encoder->type) {
6914
		switch (intel_encoder->type) {
6905
		case INTEL_OUTPUT_LVDS:
6915
		case INTEL_OUTPUT_LVDS:
6906
			is_lvds = true;
6916
			is_lvds = true;
6907
			break;
6917
			break;
6908
		}
6918
		}
6909
	}
6919
	}
6910
 
6920
 
6911
	refclk = ironlake_get_refclk(crtc);
6921
	refclk = ironlake_get_refclk(crtc);
6912
 
6922
 
6913
	/*
6923
	/*
6914
	 * Returns a set of divisors for the desired target clock with the given
6924
	 * Returns a set of divisors for the desired target clock with the given
6915
	 * refclk, or FALSE.  The returned values represent the clock equation:
6925
	 * refclk, or FALSE.  The returned values represent the clock equation:
6916
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
6926
	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
6917
	 */
6927
	 */
6918
	limit = intel_limit(crtc, refclk);
6928
	limit = intel_limit(crtc, refclk);
6919
	ret = dev_priv->display.find_dpll(limit, crtc,
6929
	ret = dev_priv->display.find_dpll(limit, crtc,
6920
					  to_intel_crtc(crtc)->config.port_clock,
6930
					  to_intel_crtc(crtc)->config.port_clock,
6921
					  refclk, NULL, clock);
6931
					  refclk, NULL, clock);
6922
	if (!ret)
6932
	if (!ret)
6923
		return false;
6933
		return false;
6924
 
6934
 
6925
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6935
	if (is_lvds && dev_priv->lvds_downclock_avail) {
6926
		/*
6936
		/*
6927
		 * Ensure we match the reduced clock's P to the target clock.
6937
		 * Ensure we match the reduced clock's P to the target clock.
6928
		 * If the clocks don't match, we can't switch the display clock
6938
		 * If the clocks don't match, we can't switch the display clock
6929
		 * by using the FP0/FP1. In such case we will disable the LVDS
6939
		 * by using the FP0/FP1. In such case we will disable the LVDS
6930
		 * downclock feature.
6940
		 * downclock feature.
6931
		*/
6941
		*/
6932
		*has_reduced_clock =
6942
		*has_reduced_clock =
6933
			dev_priv->display.find_dpll(limit, crtc,
6943
			dev_priv->display.find_dpll(limit, crtc,
6934
						     dev_priv->lvds_downclock,
6944
						     dev_priv->lvds_downclock,
6935
						    refclk, clock,
6945
						    refclk, clock,
6936
						     reduced_clock);
6946
						     reduced_clock);
6937
	}
6947
	}
6938
 
6948
 
6939
	return true;
6949
	return true;
6940
}
6950
}
6941
 
6951
 
6942
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
6952
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
6943
{
6953
{
6944
	/*
6954
	/*
6945
	 * Account for spread spectrum to avoid
6955
	 * Account for spread spectrum to avoid
6946
	 * oversubscribing the link. Max center spread
6956
	 * oversubscribing the link. Max center spread
6947
	 * is 2.5%; use 5% for safety's sake.
6957
	 * is 2.5%; use 5% for safety's sake.
6948
	 */
6958
	 */
6949
	u32 bps = target_clock * bpp * 21 / 20;
6959
	u32 bps = target_clock * bpp * 21 / 20;
6950
	return DIV_ROUND_UP(bps, link_bw * 8);
6960
	return DIV_ROUND_UP(bps, link_bw * 8);
6951
}
6961
}
6952
 
6962
 
6953
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6963
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6954
{
6964
{
6955
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
6965
	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
6956
}
6966
}
6957
 
6967
 
6958
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6968
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6959
				      u32 *fp,
6969
				      u32 *fp,
6960
				      intel_clock_t *reduced_clock, u32 *fp2)
6970
				      intel_clock_t *reduced_clock, u32 *fp2)
6961
{
6971
{
6962
	struct drm_crtc *crtc = &intel_crtc->base;
6972
	struct drm_crtc *crtc = &intel_crtc->base;
6963
	struct drm_device *dev = crtc->dev;
6973
	struct drm_device *dev = crtc->dev;
6964
	struct drm_i915_private *dev_priv = dev->dev_private;
6974
	struct drm_i915_private *dev_priv = dev->dev_private;
6965
	struct intel_encoder *intel_encoder;
6975
	struct intel_encoder *intel_encoder;
6966
	uint32_t dpll;
6976
	uint32_t dpll;
6967
	int factor, num_connectors = 0;
6977
	int factor, num_connectors = 0;
6968
	bool is_lvds = false, is_sdvo = false;
6978
	bool is_lvds = false, is_sdvo = false;
6969
 
6979
 
6970
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6980
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6971
		switch (intel_encoder->type) {
6981
		switch (intel_encoder->type) {
6972
		case INTEL_OUTPUT_LVDS:
6982
		case INTEL_OUTPUT_LVDS:
6973
			is_lvds = true;
6983
			is_lvds = true;
6974
			break;
6984
			break;
6975
		case INTEL_OUTPUT_SDVO:
6985
		case INTEL_OUTPUT_SDVO:
6976
		case INTEL_OUTPUT_HDMI:
6986
		case INTEL_OUTPUT_HDMI:
6977
			is_sdvo = true;
6987
			is_sdvo = true;
6978
			break;
6988
			break;
6979
		}
6989
		}
6980
 
6990
 
6981
		num_connectors++;
6991
		num_connectors++;
6982
	}
6992
	}
6983
 
6993
 
6984
    /* Enable autotuning of the PLL clock (if permissible) */
6994
    /* Enable autotuning of the PLL clock (if permissible) */
6985
    factor = 21;
6995
    factor = 21;
6986
    if (is_lvds) {
6996
    if (is_lvds) {
6987
        if ((intel_panel_use_ssc(dev_priv) &&
6997
        if ((intel_panel_use_ssc(dev_priv) &&
6988
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
6998
		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
6989
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
6999
		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
6990
            factor = 25;
7000
            factor = 25;
6991
	} else if (intel_crtc->config.sdvo_tv_clock)
7001
	} else if (intel_crtc->config.sdvo_tv_clock)
6992
        factor = 20;
7002
        factor = 20;
6993
 
7003
 
6994
	if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
7004
	if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
6995
		*fp |= FP_CB_TUNE;
7005
		*fp |= FP_CB_TUNE;
6996
 
7006
 
6997
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
7007
	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
6998
		*fp2 |= FP_CB_TUNE;
7008
		*fp2 |= FP_CB_TUNE;
6999
 
7009
 
7000
    dpll = 0;
7010
    dpll = 0;
7001
 
7011
 
7002
    if (is_lvds)
7012
    if (is_lvds)
7003
        dpll |= DPLLB_MODE_LVDS;
7013
        dpll |= DPLLB_MODE_LVDS;
7004
    else
7014
    else
7005
        dpll |= DPLLB_MODE_DAC_SERIAL;
7015
        dpll |= DPLLB_MODE_DAC_SERIAL;
7006
 
7016
 
7007
			dpll |= (intel_crtc->config.pixel_multiplier - 1)
7017
			dpll |= (intel_crtc->config.pixel_multiplier - 1)
7008
				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
7018
				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
7009
 
7019
 
7010
	if (is_sdvo)
7020
	if (is_sdvo)
7011
		dpll |= DPLL_SDVO_HIGH_SPEED;
7021
		dpll |= DPLL_SDVO_HIGH_SPEED;
7012
	if (intel_crtc->config.has_dp_encoder)
7022
	if (intel_crtc->config.has_dp_encoder)
7013
		dpll |= DPLL_SDVO_HIGH_SPEED;
7023
		dpll |= DPLL_SDVO_HIGH_SPEED;
7014
 
7024
 
7015
    /* compute bitmask from p1 value */
7025
    /* compute bitmask from p1 value */
7016
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7026
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7017
    /* also FPA1 */
7027
    /* also FPA1 */
7018
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7028
	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7019
 
7029
 
7020
	switch (intel_crtc->config.dpll.p2) {
7030
	switch (intel_crtc->config.dpll.p2) {
7021
    case 5:
7031
    case 5:
7022
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7032
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7023
        break;
7033
        break;
7024
    case 7:
7034
    case 7:
7025
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7035
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7026
        break;
7036
        break;
7027
    case 10:
7037
    case 10:
7028
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7038
        dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7029
        break;
7039
        break;
7030
    case 14:
7040
    case 14:
7031
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7041
        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7032
        break;
7042
        break;
7033
    }
7043
    }
7034
 
7044
 
7035
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7045
	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7036
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7046
        dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7037
    else
7047
    else
7038
        dpll |= PLL_REF_INPUT_DREFCLK;
7048
        dpll |= PLL_REF_INPUT_DREFCLK;
7039
 
7049
 
7040
	return dpll | DPLL_VCO_ENABLE;
7050
	return dpll | DPLL_VCO_ENABLE;
7041
}
7051
}
7042
 
7052
 
7043
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
7053
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
7044
				  int x, int y,
7054
				  int x, int y,
7045
				  struct drm_framebuffer *fb)
7055
				  struct drm_framebuffer *fb)
7046
{
7056
{
7047
	struct drm_device *dev = crtc->dev;
7057
	struct drm_device *dev = crtc->dev;
7048
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7058
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7049
	int num_connectors = 0;
7059
	int num_connectors = 0;
7050
	intel_clock_t clock, reduced_clock;
7060
	intel_clock_t clock, reduced_clock;
7051
	u32 dpll = 0, fp = 0, fp2 = 0;
7061
	u32 dpll = 0, fp = 0, fp2 = 0;
7052
	bool ok, has_reduced_clock = false;
7062
	bool ok, has_reduced_clock = false;
7053
	bool is_lvds = false;
7063
	bool is_lvds = false;
7054
	struct intel_encoder *encoder;
7064
	struct intel_encoder *encoder;
7055
	struct intel_shared_dpll *pll;
7065
	struct intel_shared_dpll *pll;
7056
 
7066
 
7057
	for_each_encoder_on_crtc(dev, crtc, encoder) {
7067
	for_each_encoder_on_crtc(dev, crtc, encoder) {
7058
		switch (encoder->type) {
7068
		switch (encoder->type) {
7059
		case INTEL_OUTPUT_LVDS:
7069
		case INTEL_OUTPUT_LVDS:
7060
			is_lvds = true;
7070
			is_lvds = true;
7061
			break;
7071
			break;
7062
		}
7072
		}
7063
 
7073
 
7064
		num_connectors++;
7074
		num_connectors++;
7065
	}
7075
	}
7066
 
7076
 
7067
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7077
	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7068
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7078
	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7069
 
7079
 
7070
	ok = ironlake_compute_clocks(crtc, &clock,
7080
	ok = ironlake_compute_clocks(crtc, &clock,
7071
				     &has_reduced_clock, &reduced_clock);
7081
				     &has_reduced_clock, &reduced_clock);
7072
	if (!ok && !intel_crtc->config.clock_set) {
7082
	if (!ok && !intel_crtc->config.clock_set) {
7073
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7083
		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7074
		return -EINVAL;
7084
		return -EINVAL;
7075
	}
7085
	}
7076
	/* Compat-code for transition, will disappear. */
7086
	/* Compat-code for transition, will disappear. */
7077
	if (!intel_crtc->config.clock_set) {
7087
	if (!intel_crtc->config.clock_set) {
7078
		intel_crtc->config.dpll.n = clock.n;
7088
		intel_crtc->config.dpll.n = clock.n;
7079
		intel_crtc->config.dpll.m1 = clock.m1;
7089
		intel_crtc->config.dpll.m1 = clock.m1;
7080
		intel_crtc->config.dpll.m2 = clock.m2;
7090
		intel_crtc->config.dpll.m2 = clock.m2;
7081
		intel_crtc->config.dpll.p1 = clock.p1;
7091
		intel_crtc->config.dpll.p1 = clock.p1;
7082
		intel_crtc->config.dpll.p2 = clock.p2;
7092
		intel_crtc->config.dpll.p2 = clock.p2;
7083
	}
7093
	}
7084
 
7094
 
7085
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
7095
	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
7086
	if (intel_crtc->config.has_pch_encoder) {
7096
	if (intel_crtc->config.has_pch_encoder) {
7087
		fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
7097
		fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
7088
	if (has_reduced_clock)
7098
	if (has_reduced_clock)
7089
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
7099
			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
7090
 
7100
 
7091
		dpll = ironlake_compute_dpll(intel_crtc,
7101
		dpll = ironlake_compute_dpll(intel_crtc,
7092
					     &fp, &reduced_clock,
7102
					     &fp, &reduced_clock,
7093
					     has_reduced_clock ? &fp2 : NULL);
7103
					     has_reduced_clock ? &fp2 : NULL);
7094
 
7104
 
7095
		intel_crtc->config.dpll_hw_state.dpll = dpll;
7105
		intel_crtc->config.dpll_hw_state.dpll = dpll;
7096
		intel_crtc->config.dpll_hw_state.fp0 = fp;
7106
		intel_crtc->config.dpll_hw_state.fp0 = fp;
7097
		if (has_reduced_clock)
7107
		if (has_reduced_clock)
7098
			intel_crtc->config.dpll_hw_state.fp1 = fp2;
7108
			intel_crtc->config.dpll_hw_state.fp1 = fp2;
7099
		else
7109
		else
7100
			intel_crtc->config.dpll_hw_state.fp1 = fp;
7110
			intel_crtc->config.dpll_hw_state.fp1 = fp;
7101
 
7111
 
7102
		pll = intel_get_shared_dpll(intel_crtc);
7112
		pll = intel_get_shared_dpll(intel_crtc);
7103
		if (pll == NULL) {
7113
		if (pll == NULL) {
7104
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
7114
			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
7105
					 pipe_name(intel_crtc->pipe));
7115
					 pipe_name(intel_crtc->pipe));
7106
			return -EINVAL;
7116
			return -EINVAL;
7107
        }
7117
        }
7108
	} else
7118
	} else
7109
		intel_put_shared_dpll(intel_crtc);
7119
		intel_put_shared_dpll(intel_crtc);
7110
 
7120
 
7111
	if (is_lvds && has_reduced_clock && i915.powersave)
7121
	if (is_lvds && has_reduced_clock && i915.powersave)
7112
		intel_crtc->lowfreq_avail = true;
7122
		intel_crtc->lowfreq_avail = true;
7113
	else
7123
	else
7114
		intel_crtc->lowfreq_avail = false;
7124
		intel_crtc->lowfreq_avail = false;
7115
 
7125
 
7116
	return 0;
7126
	return 0;
7117
}
7127
}
7118
 
7128
 
7119
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7129
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7120
					 struct intel_link_m_n *m_n)
7130
					 struct intel_link_m_n *m_n)
7121
{
7131
{
7122
	struct drm_device *dev = crtc->base.dev;
7132
	struct drm_device *dev = crtc->base.dev;
7123
	struct drm_i915_private *dev_priv = dev->dev_private;
7133
	struct drm_i915_private *dev_priv = dev->dev_private;
7124
	enum pipe pipe = crtc->pipe;
7134
	enum pipe pipe = crtc->pipe;
7125
 
7135
 
7126
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
7136
	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
7127
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
7137
	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
7128
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
7138
	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
7129
		& ~TU_SIZE_MASK;
7139
		& ~TU_SIZE_MASK;
7130
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
7140
	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
7131
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
7141
	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
7132
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7142
		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7133
}
7143
}
7134
 
7144
 
7135
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7145
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7136
					 enum transcoder transcoder,
7146
					 enum transcoder transcoder,
7137
					 struct intel_link_m_n *m_n)
7147
					 struct intel_link_m_n *m_n)
7138
{
7148
{
7139
	struct drm_device *dev = crtc->base.dev;
7149
	struct drm_device *dev = crtc->base.dev;
7140
	struct drm_i915_private *dev_priv = dev->dev_private;
7150
	struct drm_i915_private *dev_priv = dev->dev_private;
7141
	enum pipe pipe = crtc->pipe;
7151
	enum pipe pipe = crtc->pipe;
7142
 
7152
 
7143
	if (INTEL_INFO(dev)->gen >= 5) {
7153
	if (INTEL_INFO(dev)->gen >= 5) {
7144
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
7154
		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
7145
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
7155
		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
7146
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
7156
		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
7147
					& ~TU_SIZE_MASK;
7157
					& ~TU_SIZE_MASK;
7148
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7158
		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7149
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
7159
		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
7150
				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7160
				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7151
	} else {
7161
	} else {
7152
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7162
		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7153
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7163
		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7154
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
7164
		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
7155
			& ~TU_SIZE_MASK;
7165
			& ~TU_SIZE_MASK;
7156
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
7166
		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
7157
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
7167
		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
7158
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7168
			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7159
	}
7169
	}
7160
}
7170
}
7161
 
7171
 
7162
void intel_dp_get_m_n(struct intel_crtc *crtc,
7172
void intel_dp_get_m_n(struct intel_crtc *crtc,
7163
		      struct intel_crtc_config *pipe_config)
7173
		      struct intel_crtc_config *pipe_config)
7164
{
7174
{
7165
	if (crtc->config.has_pch_encoder)
7175
	if (crtc->config.has_pch_encoder)
7166
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7176
		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7167
	else
7177
	else
7168
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7178
		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7169
					     &pipe_config->dp_m_n);
7179
					     &pipe_config->dp_m_n);
7170
}
7180
}
7171
 
7181
 
7172
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7182
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7173
					struct intel_crtc_config *pipe_config)
7183
					struct intel_crtc_config *pipe_config)
7174
{
7184
{
7175
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7185
	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7176
				     &pipe_config->fdi_m_n);
7186
				     &pipe_config->fdi_m_n);
7177
}
7187
}
7178
 
7188
 
7179
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7189
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7180
				     struct intel_crtc_config *pipe_config)
7190
				     struct intel_crtc_config *pipe_config)
7181
{
7191
{
7182
	struct drm_device *dev = crtc->base.dev;
7192
	struct drm_device *dev = crtc->base.dev;
7183
	struct drm_i915_private *dev_priv = dev->dev_private;
7193
	struct drm_i915_private *dev_priv = dev->dev_private;
7184
	uint32_t tmp;
7194
	uint32_t tmp;
7185
 
7195
 
7186
	tmp = I915_READ(PF_CTL(crtc->pipe));
7196
	tmp = I915_READ(PF_CTL(crtc->pipe));
7187
 
7197
 
7188
	if (tmp & PF_ENABLE) {
7198
	if (tmp & PF_ENABLE) {
7189
		pipe_config->pch_pfit.enabled = true;
7199
		pipe_config->pch_pfit.enabled = true;
7190
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
7200
		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
7191
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
7201
		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
7192
 
7202
 
7193
		/* We currently do not free assignements of panel fitters on
7203
		/* We currently do not free assignements of panel fitters on
7194
		 * ivb/hsw (since we don't use the higher upscaling modes which
7204
		 * ivb/hsw (since we don't use the higher upscaling modes which
7195
		 * differentiates them) so just WARN about this case for now. */
7205
		 * differentiates them) so just WARN about this case for now. */
7196
		if (IS_GEN7(dev)) {
7206
		if (IS_GEN7(dev)) {
7197
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
7207
			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
7198
				PF_PIPE_SEL_IVB(crtc->pipe));
7208
				PF_PIPE_SEL_IVB(crtc->pipe));
7199
		}
7209
		}
7200
	}
7210
	}
7201
}
7211
}
7202
 
7212
 
7203
static void ironlake_get_plane_config(struct intel_crtc *crtc,
7213
static void ironlake_get_plane_config(struct intel_crtc *crtc,
7204
				      struct intel_plane_config *plane_config)
7214
				      struct intel_plane_config *plane_config)
7205
{
7215
{
7206
	struct drm_device *dev = crtc->base.dev;
7216
	struct drm_device *dev = crtc->base.dev;
7207
	struct drm_i915_private *dev_priv = dev->dev_private;
7217
	struct drm_i915_private *dev_priv = dev->dev_private;
7208
	u32 val, base, offset;
7218
	u32 val, base, offset;
7209
	int pipe = crtc->pipe, plane = crtc->plane;
7219
	int pipe = crtc->pipe, plane = crtc->plane;
7210
	int fourcc, pixel_format;
7220
	int fourcc, pixel_format;
7211
	int aligned_height;
7221
	int aligned_height;
7212
 
7222
 
7213
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
7223
	crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
7214
	if (!crtc->base.primary->fb) {
7224
	if (!crtc->base.primary->fb) {
7215
		DRM_DEBUG_KMS("failed to alloc fb\n");
7225
		DRM_DEBUG_KMS("failed to alloc fb\n");
7216
		return;
7226
		return;
7217
	}
7227
	}
7218
 
7228
 
7219
	val = I915_READ(DSPCNTR(plane));
7229
	val = I915_READ(DSPCNTR(plane));
7220
 
7230
 
7221
	if (INTEL_INFO(dev)->gen >= 4)
7231
	if (INTEL_INFO(dev)->gen >= 4)
7222
		if (val & DISPPLANE_TILED)
7232
		if (val & DISPPLANE_TILED)
7223
			plane_config->tiled = true;
7233
			plane_config->tiled = true;
7224
 
7234
 
7225
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7235
	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7226
	fourcc = intel_format_to_fourcc(pixel_format);
7236
	fourcc = intel_format_to_fourcc(pixel_format);
7227
	crtc->base.primary->fb->pixel_format = fourcc;
7237
	crtc->base.primary->fb->pixel_format = fourcc;
7228
	crtc->base.primary->fb->bits_per_pixel =
7238
	crtc->base.primary->fb->bits_per_pixel =
7229
		drm_format_plane_cpp(fourcc, 0) * 8;
7239
		drm_format_plane_cpp(fourcc, 0) * 8;
7230
 
7240
 
7231
	base = I915_READ(DSPSURF(plane)) & 0xfffff000;
7241
	base = I915_READ(DSPSURF(plane)) & 0xfffff000;
7232
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7242
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7233
		offset = I915_READ(DSPOFFSET(plane));
7243
		offset = I915_READ(DSPOFFSET(plane));
7234
	} else {
7244
	} else {
7235
		if (plane_config->tiled)
7245
		if (plane_config->tiled)
7236
			offset = I915_READ(DSPTILEOFF(plane));
7246
			offset = I915_READ(DSPTILEOFF(plane));
7237
		else
7247
		else
7238
			offset = I915_READ(DSPLINOFF(plane));
7248
			offset = I915_READ(DSPLINOFF(plane));
7239
	}
7249
	}
7240
	plane_config->base = base;
7250
	plane_config->base = base;
7241
 
7251
 
7242
	val = I915_READ(PIPESRC(pipe));
7252
	val = I915_READ(PIPESRC(pipe));
7243
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
7253
	crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
7244
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
7254
	crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
7245
 
7255
 
7246
	val = I915_READ(DSPSTRIDE(pipe));
7256
	val = I915_READ(DSPSTRIDE(pipe));
7247
	crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
7257
	crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
7248
 
7258
 
7249
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7259
	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7250
					    plane_config->tiled);
7260
					    plane_config->tiled);
7251
 
7261
 
7252
	plane_config->size = 16*1024*1024;
7262
	plane_config->size = 16*1024*1024;
7253
 
7263
 
7254
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7264
	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7255
		      pipe, plane, crtc->base.primary->fb->width,
7265
		      pipe, plane, crtc->base.primary->fb->width,
7256
		      crtc->base.primary->fb->height,
7266
		      crtc->base.primary->fb->height,
7257
		      crtc->base.primary->fb->bits_per_pixel, base,
7267
		      crtc->base.primary->fb->bits_per_pixel, base,
7258
		      crtc->base.primary->fb->pitches[0],
7268
		      crtc->base.primary->fb->pitches[0],
7259
		      plane_config->size);
7269
		      plane_config->size);
7260
}
7270
}
7261
 
7271
 
7262
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7272
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7263
				     struct intel_crtc_config *pipe_config)
7273
				     struct intel_crtc_config *pipe_config)
7264
{
7274
{
7265
	struct drm_device *dev = crtc->base.dev;
7275
	struct drm_device *dev = crtc->base.dev;
7266
	struct drm_i915_private *dev_priv = dev->dev_private;
7276
	struct drm_i915_private *dev_priv = dev->dev_private;
7267
	uint32_t tmp;
7277
	uint32_t tmp;
7268
 
7278
 
7269
	if (!intel_display_power_enabled(dev_priv,
7279
	if (!intel_display_power_enabled(dev_priv,
7270
					 POWER_DOMAIN_PIPE(crtc->pipe)))
7280
					 POWER_DOMAIN_PIPE(crtc->pipe)))
7271
		return false;
7281
		return false;
7272
 
7282
 
7273
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7283
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7274
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7284
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7275
 
7285
 
7276
	tmp = I915_READ(PIPECONF(crtc->pipe));
7286
	tmp = I915_READ(PIPECONF(crtc->pipe));
7277
	if (!(tmp & PIPECONF_ENABLE))
7287
	if (!(tmp & PIPECONF_ENABLE))
7278
		return false;
7288
		return false;
7279
 
7289
 
7280
	switch (tmp & PIPECONF_BPC_MASK) {
7290
	switch (tmp & PIPECONF_BPC_MASK) {
7281
	case PIPECONF_6BPC:
7291
	case PIPECONF_6BPC:
7282
		pipe_config->pipe_bpp = 18;
7292
		pipe_config->pipe_bpp = 18;
7283
		break;
7293
		break;
7284
	case PIPECONF_8BPC:
7294
	case PIPECONF_8BPC:
7285
		pipe_config->pipe_bpp = 24;
7295
		pipe_config->pipe_bpp = 24;
7286
		break;
7296
		break;
7287
	case PIPECONF_10BPC:
7297
	case PIPECONF_10BPC:
7288
		pipe_config->pipe_bpp = 30;
7298
		pipe_config->pipe_bpp = 30;
7289
		break;
7299
		break;
7290
	case PIPECONF_12BPC:
7300
	case PIPECONF_12BPC:
7291
		pipe_config->pipe_bpp = 36;
7301
		pipe_config->pipe_bpp = 36;
7292
		break;
7302
		break;
7293
	default:
7303
	default:
7294
		break;
7304
		break;
7295
	}
7305
	}
7296
 
7306
 
7297
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7307
	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7298
		pipe_config->limited_color_range = true;
7308
		pipe_config->limited_color_range = true;
7299
 
7309
 
7300
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7310
	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7301
		struct intel_shared_dpll *pll;
7311
		struct intel_shared_dpll *pll;
7302
 
7312
 
7303
		pipe_config->has_pch_encoder = true;
7313
		pipe_config->has_pch_encoder = true;
7304
 
7314
 
7305
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
7315
		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
7306
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7316
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7307
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7317
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7308
 
7318
 
7309
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7319
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7310
 
7320
 
7311
		if (HAS_PCH_IBX(dev_priv->dev)) {
7321
		if (HAS_PCH_IBX(dev_priv->dev)) {
7312
			pipe_config->shared_dpll =
7322
			pipe_config->shared_dpll =
7313
				(enum intel_dpll_id) crtc->pipe;
7323
				(enum intel_dpll_id) crtc->pipe;
7314
		} else {
7324
		} else {
7315
			tmp = I915_READ(PCH_DPLL_SEL);
7325
			tmp = I915_READ(PCH_DPLL_SEL);
7316
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7326
			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7317
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
7327
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
7318
			else
7328
			else
7319
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
7329
				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
7320
		}
7330
		}
7321
 
7331
 
7322
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7332
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7323
 
7333
 
7324
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
7334
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
7325
					   &pipe_config->dpll_hw_state));
7335
					   &pipe_config->dpll_hw_state));
7326
 
7336
 
7327
		tmp = pipe_config->dpll_hw_state.dpll;
7337
		tmp = pipe_config->dpll_hw_state.dpll;
7328
		pipe_config->pixel_multiplier =
7338
		pipe_config->pixel_multiplier =
7329
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7339
			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7330
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
7340
			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
7331
 
7341
 
7332
		ironlake_pch_clock_get(crtc, pipe_config);
7342
		ironlake_pch_clock_get(crtc, pipe_config);
7333
	} else {
7343
	} else {
7334
		pipe_config->pixel_multiplier = 1;
7344
		pipe_config->pixel_multiplier = 1;
7335
	}
7345
	}
7336
 
7346
 
7337
	intel_get_pipe_timings(crtc, pipe_config);
7347
	intel_get_pipe_timings(crtc, pipe_config);
7338
 
7348
 
7339
	ironlake_get_pfit_config(crtc, pipe_config);
7349
	ironlake_get_pfit_config(crtc, pipe_config);
7340
 
7350
 
7341
	return true;
7351
	return true;
7342
}
7352
}
7343
 
7353
 
7344
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7354
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7345
{
7355
{
7346
	struct drm_device *dev = dev_priv->dev;
7356
	struct drm_device *dev = dev_priv->dev;
7347
	struct intel_crtc *crtc;
7357
	struct intel_crtc *crtc;
7348
 
7358
 
7349
	for_each_intel_crtc(dev, crtc)
7359
	for_each_intel_crtc(dev, crtc)
7350
		WARN(crtc->active, "CRTC for pipe %c enabled\n",
7360
		WARN(crtc->active, "CRTC for pipe %c enabled\n",
7351
		     pipe_name(crtc->pipe));
7361
		     pipe_name(crtc->pipe));
7352
 
7362
 
7353
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
7363
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
7354
	WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
7364
	WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
7355
	WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
7365
	WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
7356
	WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
7366
	WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
7357
	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7367
	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7358
	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7368
	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7359
	     "CPU PWM1 enabled\n");
7369
	     "CPU PWM1 enabled\n");
7360
	if (IS_HASWELL(dev))
7370
	if (IS_HASWELL(dev))
7361
	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7371
	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7362
	     "CPU PWM2 enabled\n");
7372
	     "CPU PWM2 enabled\n");
7363
	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7373
	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7364
	     "PCH PWM1 enabled\n");
7374
	     "PCH PWM1 enabled\n");
7365
	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
7375
	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
7366
	     "Utility pin enabled\n");
7376
	     "Utility pin enabled\n");
7367
	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
7377
	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
7368
 
7378
 
7369
	/*
7379
	/*
7370
	 * In theory we can still leave IRQs enabled, as long as only the HPD
7380
	 * In theory we can still leave IRQs enabled, as long as only the HPD
7371
	 * interrupts remain enabled. We used to check for that, but since it's
7381
	 * interrupts remain enabled. We used to check for that, but since it's
7372
	 * gen-specific and since we only disable LCPLL after we fully disable
7382
	 * gen-specific and since we only disable LCPLL after we fully disable
7373
	 * the interrupts, the check below should be enough.
7383
	 * the interrupts, the check below should be enough.
7374
	 */
7384
	 */
7375
	WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
7385
	WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
7376
}
7386
}
7377
 
7387
 
7378
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
7388
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
7379
{
7389
{
7380
	struct drm_device *dev = dev_priv->dev;
7390
	struct drm_device *dev = dev_priv->dev;
7381
 
7391
 
7382
	if (IS_HASWELL(dev))
7392
	if (IS_HASWELL(dev))
7383
		return I915_READ(D_COMP_HSW);
7393
		return I915_READ(D_COMP_HSW);
7384
	else
7394
	else
7385
		return I915_READ(D_COMP_BDW);
7395
		return I915_READ(D_COMP_BDW);
7386
}
7396
}
7387
 
7397
 
7388
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7398
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7389
{
7399
{
7390
	struct drm_device *dev = dev_priv->dev;
7400
	struct drm_device *dev = dev_priv->dev;
7391
 
7401
 
7392
	if (IS_HASWELL(dev)) {
7402
	if (IS_HASWELL(dev)) {
7393
		mutex_lock(&dev_priv->rps.hw_lock);
7403
		mutex_lock(&dev_priv->rps.hw_lock);
7394
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7404
		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7395
					    val))
7405
					    val))
7396
			DRM_ERROR("Failed to write to D_COMP\n");
7406
			DRM_ERROR("Failed to write to D_COMP\n");
7397
		mutex_unlock(&dev_priv->rps.hw_lock);
7407
		mutex_unlock(&dev_priv->rps.hw_lock);
7398
	} else {
7408
	} else {
7399
		I915_WRITE(D_COMP_BDW, val);
7409
		I915_WRITE(D_COMP_BDW, val);
7400
		POSTING_READ(D_COMP_BDW);
7410
		POSTING_READ(D_COMP_BDW);
7401
	}
7411
	}
7402
}
7412
}
7403
 
7413
 
7404
/*
7414
/*
7405
 * This function implements pieces of two sequences from BSpec:
7415
 * This function implements pieces of two sequences from BSpec:
7406
 * - Sequence for display software to disable LCPLL
7416
 * - Sequence for display software to disable LCPLL
7407
 * - Sequence for display software to allow package C8+
7417
 * - Sequence for display software to allow package C8+
7408
 * The steps implemented here are just the steps that actually touch the LCPLL
7418
 * The steps implemented here are just the steps that actually touch the LCPLL
7409
 * register. Callers should take care of disabling all the display engine
7419
 * register. Callers should take care of disabling all the display engine
7410
 * functions, doing the mode unset, fixing interrupts, etc.
7420
 * functions, doing the mode unset, fixing interrupts, etc.
7411
 */
7421
 */
7412
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
7422
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
7413
		       bool switch_to_fclk, bool allow_power_down)
7423
		       bool switch_to_fclk, bool allow_power_down)
7414
{
7424
{
7415
	uint32_t val;
7425
	uint32_t val;
7416
 
7426
 
7417
	assert_can_disable_lcpll(dev_priv);
7427
	assert_can_disable_lcpll(dev_priv);
7418
 
7428
 
7419
	val = I915_READ(LCPLL_CTL);
7429
	val = I915_READ(LCPLL_CTL);
7420
 
7430
 
7421
	if (switch_to_fclk) {
7431
	if (switch_to_fclk) {
7422
		val |= LCPLL_CD_SOURCE_FCLK;
7432
		val |= LCPLL_CD_SOURCE_FCLK;
7423
		I915_WRITE(LCPLL_CTL, val);
7433
		I915_WRITE(LCPLL_CTL, val);
7424
 
7434
 
7425
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
7435
		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
7426
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
7436
				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
7427
			DRM_ERROR("Switching to FCLK failed\n");
7437
			DRM_ERROR("Switching to FCLK failed\n");
7428
 
7438
 
7429
		val = I915_READ(LCPLL_CTL);
7439
		val = I915_READ(LCPLL_CTL);
7430
	}
7440
	}
7431
 
7441
 
7432
	val |= LCPLL_PLL_DISABLE;
7442
	val |= LCPLL_PLL_DISABLE;
7433
	I915_WRITE(LCPLL_CTL, val);
7443
	I915_WRITE(LCPLL_CTL, val);
7434
	POSTING_READ(LCPLL_CTL);
7444
	POSTING_READ(LCPLL_CTL);
7435
 
7445
 
7436
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7446
	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7437
		DRM_ERROR("LCPLL still locked\n");
7447
		DRM_ERROR("LCPLL still locked\n");
7438
 
7448
 
7439
	val = hsw_read_dcomp(dev_priv);
7449
	val = hsw_read_dcomp(dev_priv);
7440
	val |= D_COMP_COMP_DISABLE;
7450
	val |= D_COMP_COMP_DISABLE;
7441
	hsw_write_dcomp(dev_priv, val);
7451
	hsw_write_dcomp(dev_priv, val);
7442
	ndelay(100);
7452
	ndelay(100);
7443
 
7453
 
7444
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
7454
	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
7445
		     1))
7455
		     1))
7446
		DRM_ERROR("D_COMP RCOMP still in progress\n");
7456
		DRM_ERROR("D_COMP RCOMP still in progress\n");
7447
 
7457
 
7448
	if (allow_power_down) {
7458
	if (allow_power_down) {
7449
		val = I915_READ(LCPLL_CTL);
7459
		val = I915_READ(LCPLL_CTL);
7450
		val |= LCPLL_POWER_DOWN_ALLOW;
7460
		val |= LCPLL_POWER_DOWN_ALLOW;
7451
		I915_WRITE(LCPLL_CTL, val);
7461
		I915_WRITE(LCPLL_CTL, val);
7452
		POSTING_READ(LCPLL_CTL);
7462
		POSTING_READ(LCPLL_CTL);
7453
	}
7463
	}
7454
}
7464
}
7455
 
7465
 
7456
/*
7466
/*
7457
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
7467
 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
7458
 * source.
7468
 * source.
7459
 */
7469
 */
7460
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7470
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7461
{
7471
{
7462
	uint32_t val;
7472
	uint32_t val;
7463
	unsigned long irqflags;
7473
	unsigned long irqflags;
7464
 
7474
 
7465
	val = I915_READ(LCPLL_CTL);
7475
	val = I915_READ(LCPLL_CTL);
7466
 
7476
 
7467
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
7477
	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
7468
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
7478
		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
7469
		return;
7479
		return;
7470
 
7480
 
7471
	/*
7481
	/*
7472
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
7482
	 * Make sure we're not on PC8 state before disabling PC8, otherwise
7473
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
7483
	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
7474
	 *
7484
	 *
7475
	 * The other problem is that hsw_restore_lcpll() is called as part of
7485
	 * The other problem is that hsw_restore_lcpll() is called as part of
7476
	 * the runtime PM resume sequence, so we can't just call
7486
	 * the runtime PM resume sequence, so we can't just call
7477
	 * gen6_gt_force_wake_get() because that function calls
7487
	 * gen6_gt_force_wake_get() because that function calls
7478
	 * intel_runtime_pm_get(), and we can't change the runtime PM refcount
7488
	 * intel_runtime_pm_get(), and we can't change the runtime PM refcount
7479
	 * while we are on the resume sequence. So to solve this problem we have
7489
	 * while we are on the resume sequence. So to solve this problem we have
7480
	 * to call special forcewake code that doesn't touch runtime PM and
7490
	 * to call special forcewake code that doesn't touch runtime PM and
7481
	 * doesn't enable the forcewake delayed work.
7491
	 * doesn't enable the forcewake delayed work.
7482
	 */
7492
	 */
7483
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
7493
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
7484
	if (dev_priv->uncore.forcewake_count++ == 0)
7494
	if (dev_priv->uncore.forcewake_count++ == 0)
7485
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
7495
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
7486
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
7496
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
7487
 
7497
 
7488
	if (val & LCPLL_POWER_DOWN_ALLOW) {
7498
	if (val & LCPLL_POWER_DOWN_ALLOW) {
7489
		val &= ~LCPLL_POWER_DOWN_ALLOW;
7499
		val &= ~LCPLL_POWER_DOWN_ALLOW;
7490
		I915_WRITE(LCPLL_CTL, val);
7500
		I915_WRITE(LCPLL_CTL, val);
7491
		POSTING_READ(LCPLL_CTL);
7501
		POSTING_READ(LCPLL_CTL);
7492
	}
7502
	}
7493
 
7503
 
7494
	val = hsw_read_dcomp(dev_priv);
7504
	val = hsw_read_dcomp(dev_priv);
7495
	val |= D_COMP_COMP_FORCE;
7505
	val |= D_COMP_COMP_FORCE;
7496
	val &= ~D_COMP_COMP_DISABLE;
7506
	val &= ~D_COMP_COMP_DISABLE;
7497
	hsw_write_dcomp(dev_priv, val);
7507
	hsw_write_dcomp(dev_priv, val);
7498
 
7508
 
7499
	val = I915_READ(LCPLL_CTL);
7509
	val = I915_READ(LCPLL_CTL);
7500
	val &= ~LCPLL_PLL_DISABLE;
7510
	val &= ~LCPLL_PLL_DISABLE;
7501
	I915_WRITE(LCPLL_CTL, val);
7511
	I915_WRITE(LCPLL_CTL, val);
7502
 
7512
 
7503
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
7513
	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
7504
		DRM_ERROR("LCPLL not locked yet\n");
7514
		DRM_ERROR("LCPLL not locked yet\n");
7505
 
7515
 
7506
	if (val & LCPLL_CD_SOURCE_FCLK) {
7516
	if (val & LCPLL_CD_SOURCE_FCLK) {
7507
		val = I915_READ(LCPLL_CTL);
7517
		val = I915_READ(LCPLL_CTL);
7508
		val &= ~LCPLL_CD_SOURCE_FCLK;
7518
		val &= ~LCPLL_CD_SOURCE_FCLK;
7509
		I915_WRITE(LCPLL_CTL, val);
7519
		I915_WRITE(LCPLL_CTL, val);
7510
 
7520
 
7511
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
7521
		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
7512
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
7522
					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
7513
			DRM_ERROR("Switching back to LCPLL failed\n");
7523
			DRM_ERROR("Switching back to LCPLL failed\n");
7514
	}
7524
	}
7515
 
7525
 
7516
	/* See the big comment above. */
7526
	/* See the big comment above. */
7517
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
7527
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
7518
	if (--dev_priv->uncore.forcewake_count == 0)
7528
	if (--dev_priv->uncore.forcewake_count == 0)
7519
		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
7529
		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
7520
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
7530
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
7521
}
7531
}
7522
 
7532
 
7523
/*
7533
/*
7524
 * Package states C8 and deeper are really deep PC states that can only be
7534
 * Package states C8 and deeper are really deep PC states that can only be
7525
 * reached when all the devices on the system allow it, so even if the graphics
7535
 * reached when all the devices on the system allow it, so even if the graphics
7526
 * device allows PC8+, it doesn't mean the system will actually get to these
7536
 * device allows PC8+, it doesn't mean the system will actually get to these
7527
 * states. Our driver only allows PC8+ when going into runtime PM.
7537
 * states. Our driver only allows PC8+ when going into runtime PM.
7528
 *
7538
 *
7529
 * The requirements for PC8+ are that all the outputs are disabled, the power
7539
 * The requirements for PC8+ are that all the outputs are disabled, the power
7530
 * well is disabled and most interrupts are disabled, and these are also
7540
 * well is disabled and most interrupts are disabled, and these are also
7531
 * requirements for runtime PM. When these conditions are met, we manually do
7541
 * requirements for runtime PM. When these conditions are met, we manually do
7532
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
7542
 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
7533
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
7543
 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
7534
 * hang the machine.
7544
 * hang the machine.
7535
 *
7545
 *
7536
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
7546
 * When we really reach PC8 or deeper states (not just when we allow it) we lose
7537
 * the state of some registers, so when we come back from PC8+ we need to
7547
 * the state of some registers, so when we come back from PC8+ we need to
7538
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
7548
 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
7539
 * need to take care of the registers kept by RC6. Notice that this happens even
7549
 * need to take care of the registers kept by RC6. Notice that this happens even
7540
 * if we don't put the device in PCI D3 state (which is what currently happens
7550
 * if we don't put the device in PCI D3 state (which is what currently happens
7541
 * because of the runtime PM support).
7551
 * because of the runtime PM support).
7542
 *
7552
 *
7543
 * For more, read "Display Sequences for Package C8" on the hardware
7553
 * For more, read "Display Sequences for Package C8" on the hardware
7544
 * documentation.
7554
 * documentation.
7545
 */
7555
 */
7546
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
7556
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
7547
{
7557
{
7548
	struct drm_device *dev = dev_priv->dev;
7558
	struct drm_device *dev = dev_priv->dev;
7549
	uint32_t val;
7559
	uint32_t val;
7550
 
7560
 
7551
	DRM_DEBUG_KMS("Enabling package C8+\n");
7561
	DRM_DEBUG_KMS("Enabling package C8+\n");
7552
 
7562
 
7553
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7563
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7554
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
7564
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
7555
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7565
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7556
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7566
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7557
	}
7567
	}
7558
 
7568
 
7559
	lpt_disable_clkout_dp(dev);
7569
	lpt_disable_clkout_dp(dev);
7560
	hsw_disable_lcpll(dev_priv, true, true);
7570
	hsw_disable_lcpll(dev_priv, true, true);
7561
}
7571
}
7562
 
7572
 
7563
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
7573
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
7564
{
7574
{
7565
	struct drm_device *dev = dev_priv->dev;
7575
	struct drm_device *dev = dev_priv->dev;
7566
	uint32_t val;
7576
	uint32_t val;
7567
 
7577
 
7568
	DRM_DEBUG_KMS("Disabling package C8+\n");
7578
	DRM_DEBUG_KMS("Disabling package C8+\n");
7569
 
7579
 
7570
	hsw_restore_lcpll(dev_priv);
7580
	hsw_restore_lcpll(dev_priv);
7571
	lpt_init_pch_refclk(dev);
7581
	lpt_init_pch_refclk(dev);
7572
 
7582
 
7573
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7583
	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7574
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
7584
		val = I915_READ(SOUTH_DSPCLK_GATE_D);
7575
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
7585
		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
7576
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7586
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7577
	}
7587
	}
7578
 
7588
 
7579
	intel_prepare_ddi(dev);
7589
	intel_prepare_ddi(dev);
7580
}
7590
}
7581
 
7591
 
7582
static void snb_modeset_global_resources(struct drm_device *dev)
7592
static void snb_modeset_global_resources(struct drm_device *dev)
7583
{
7593
{
7584
	modeset_update_crtc_power_domains(dev);
7594
	modeset_update_crtc_power_domains(dev);
7585
}
7595
}
7586
 
7596
 
7587
static void haswell_modeset_global_resources(struct drm_device *dev)
7597
static void haswell_modeset_global_resources(struct drm_device *dev)
7588
{
7598
{
7589
	modeset_update_crtc_power_domains(dev);
7599
	modeset_update_crtc_power_domains(dev);
7590
}
7600
}
7591
 
7601
 
7592
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
7602
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
7593
				 int x, int y,
7603
				 int x, int y,
7594
				 struct drm_framebuffer *fb)
7604
				 struct drm_framebuffer *fb)
7595
{
7605
{
7596
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7606
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7597
 
7607
 
7598
	if (!intel_ddi_pll_select(intel_crtc))
7608
	if (!intel_ddi_pll_select(intel_crtc))
7599
		return -EINVAL;
7609
		return -EINVAL;
7600
 
7610
 
7601
	intel_crtc->lowfreq_avail = false;
7611
	intel_crtc->lowfreq_avail = false;
7602
 
7612
 
7603
	return 0;
7613
	return 0;
7604
}
7614
}
7605
 
7615
 
7606
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7616
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7607
				       struct intel_crtc_config *pipe_config)
7617
				       struct intel_crtc_config *pipe_config)
7608
{
7618
{
7609
	struct drm_device *dev = crtc->base.dev;
7619
	struct drm_device *dev = crtc->base.dev;
7610
	struct drm_i915_private *dev_priv = dev->dev_private;
7620
	struct drm_i915_private *dev_priv = dev->dev_private;
7611
	struct intel_shared_dpll *pll;
7621
	struct intel_shared_dpll *pll;
7612
	enum port port;
7622
	enum port port;
7613
	uint32_t tmp;
7623
	uint32_t tmp;
7614
 
7624
 
7615
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7625
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7616
 
7626
 
7617
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
7627
	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
7618
 
7628
 
7619
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
7629
	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
7620
 
7630
 
7621
	switch (pipe_config->ddi_pll_sel) {
7631
	switch (pipe_config->ddi_pll_sel) {
7622
	case PORT_CLK_SEL_WRPLL1:
7632
	case PORT_CLK_SEL_WRPLL1:
7623
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
7633
		pipe_config->shared_dpll = DPLL_ID_WRPLL1;
7624
		break;
7634
		break;
7625
	case PORT_CLK_SEL_WRPLL2:
7635
	case PORT_CLK_SEL_WRPLL2:
7626
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
7636
		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
7627
		break;
7637
		break;
7628
	}
7638
	}
7629
 
7639
 
7630
	if (pipe_config->shared_dpll >= 0) {
7640
	if (pipe_config->shared_dpll >= 0) {
7631
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7641
		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7632
 
7642
 
7633
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
7643
		WARN_ON(!pll->get_hw_state(dev_priv, pll,
7634
					   &pipe_config->dpll_hw_state));
7644
					   &pipe_config->dpll_hw_state));
7635
	}
7645
	}
7636
 
7646
 
7637
	/*
7647
	/*
7638
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
7648
	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
7639
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
7649
	 * DDI E. So just check whether this pipe is wired to DDI E and whether
7640
	 * the PCH transcoder is on.
7650
	 * the PCH transcoder is on.
7641
	 */
7651
	 */
7642
	if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7652
	if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7643
		pipe_config->has_pch_encoder = true;
7653
		pipe_config->has_pch_encoder = true;
7644
 
7654
 
7645
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7655
		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7646
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7656
		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7647
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7657
					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7648
 
7658
 
7649
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7659
		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7650
	}
7660
	}
7651
}
7661
}
7652
 
7662
 
7653
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7663
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7654
				    struct intel_crtc_config *pipe_config)
7664
				    struct intel_crtc_config *pipe_config)
7655
{
7665
{
7656
	struct drm_device *dev = crtc->base.dev;
7666
	struct drm_device *dev = crtc->base.dev;
7657
	struct drm_i915_private *dev_priv = dev->dev_private;
7667
	struct drm_i915_private *dev_priv = dev->dev_private;
7658
	enum intel_display_power_domain pfit_domain;
7668
	enum intel_display_power_domain pfit_domain;
7659
	uint32_t tmp;
7669
	uint32_t tmp;
7660
 
7670
 
7661
	if (!intel_display_power_enabled(dev_priv,
7671
	if (!intel_display_power_enabled(dev_priv,
7662
					 POWER_DOMAIN_PIPE(crtc->pipe)))
7672
					 POWER_DOMAIN_PIPE(crtc->pipe)))
7663
		return false;
7673
		return false;
7664
 
7674
 
7665
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7675
	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7666
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7676
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7667
 
7677
 
7668
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
7678
	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
7669
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
7679
	if (tmp & TRANS_DDI_FUNC_ENABLE) {
7670
		enum pipe trans_edp_pipe;
7680
		enum pipe trans_edp_pipe;
7671
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
7681
		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
7672
		default:
7682
		default:
7673
			WARN(1, "unknown pipe linked to edp transcoder\n");
7683
			WARN(1, "unknown pipe linked to edp transcoder\n");
7674
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
7684
		case TRANS_DDI_EDP_INPUT_A_ONOFF:
7675
		case TRANS_DDI_EDP_INPUT_A_ON:
7685
		case TRANS_DDI_EDP_INPUT_A_ON:
7676
			trans_edp_pipe = PIPE_A;
7686
			trans_edp_pipe = PIPE_A;
7677
			break;
7687
			break;
7678
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
7688
		case TRANS_DDI_EDP_INPUT_B_ONOFF:
7679
			trans_edp_pipe = PIPE_B;
7689
			trans_edp_pipe = PIPE_B;
7680
			break;
7690
			break;
7681
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
7691
		case TRANS_DDI_EDP_INPUT_C_ONOFF:
7682
			trans_edp_pipe = PIPE_C;
7692
			trans_edp_pipe = PIPE_C;
7683
			break;
7693
			break;
7684
		}
7694
		}
7685
 
7695
 
7686
		if (trans_edp_pipe == crtc->pipe)
7696
		if (trans_edp_pipe == crtc->pipe)
7687
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
7697
			pipe_config->cpu_transcoder = TRANSCODER_EDP;
7688
	}
7698
	}
7689
 
7699
 
7690
	if (!intel_display_power_enabled(dev_priv,
7700
	if (!intel_display_power_enabled(dev_priv,
7691
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7701
			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7692
		return false;
7702
		return false;
7693
 
7703
 
7694
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
7704
	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
7695
	if (!(tmp & PIPECONF_ENABLE))
7705
	if (!(tmp & PIPECONF_ENABLE))
7696
		return false;
7706
		return false;
7697
 
7707
 
7698
	haswell_get_ddi_port_state(crtc, pipe_config);
7708
	haswell_get_ddi_port_state(crtc, pipe_config);
7699
 
7709
 
7700
	intel_get_pipe_timings(crtc, pipe_config);
7710
	intel_get_pipe_timings(crtc, pipe_config);
7701
 
7711
 
7702
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7712
	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7703
	if (intel_display_power_enabled(dev_priv, pfit_domain))
7713
	if (intel_display_power_enabled(dev_priv, pfit_domain))
7704
		ironlake_get_pfit_config(crtc, pipe_config);
7714
		ironlake_get_pfit_config(crtc, pipe_config);
7705
 
7715
 
7706
	if (IS_HASWELL(dev))
7716
	if (IS_HASWELL(dev))
7707
	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7717
	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7708
				   (I915_READ(IPS_CTL) & IPS_ENABLE);
7718
				   (I915_READ(IPS_CTL) & IPS_ENABLE);
7709
 
7719
 
7710
	pipe_config->pixel_multiplier = 1;
7720
	pipe_config->pixel_multiplier = 1;
7711
 
7721
 
7712
	return true;
7722
	return true;
7713
}
7723
}
7714
 
7724
 
7715
static struct {
7725
static struct {
7716
	int clock;
7726
	int clock;
7717
	u32 config;
7727
	u32 config;
7718
} hdmi_audio_clock[] = {
7728
} hdmi_audio_clock[] = {
7719
	{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
7729
	{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
7720
	{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
7730
	{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
7721
	{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
7731
	{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
7722
	{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
7732
	{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
7723
	{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
7733
	{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
7724
	{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
7734
	{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
7725
	{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
7735
	{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
7726
	{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
7736
	{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
7727
	{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
7737
	{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
7728
	{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
7738
	{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
7729
};
7739
};
7730
 
7740
 
7731
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
7741
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
7732
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
7742
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
7733
{
7743
{
7734
	int i;
7744
	int i;
7735
 
7745
 
7736
	for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
7746
	for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
7737
		if (mode->clock == hdmi_audio_clock[i].clock)
7747
		if (mode->clock == hdmi_audio_clock[i].clock)
7738
			break;
7748
			break;
7739
	}
7749
	}
7740
 
7750
 
7741
	if (i == ARRAY_SIZE(hdmi_audio_clock)) {
7751
	if (i == ARRAY_SIZE(hdmi_audio_clock)) {
7742
		DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
7752
		DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
7743
		i = 1;
7753
		i = 1;
7744
	}
7754
	}
7745
 
7755
 
7746
	DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
7756
	DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
7747
		      hdmi_audio_clock[i].clock,
7757
		      hdmi_audio_clock[i].clock,
7748
		      hdmi_audio_clock[i].config);
7758
		      hdmi_audio_clock[i].config);
7749
 
7759
 
7750
	return hdmi_audio_clock[i].config;
7760
	return hdmi_audio_clock[i].config;
7751
}
7761
}
7752
 
7762
 
7753
static bool intel_eld_uptodate(struct drm_connector *connector,
7763
static bool intel_eld_uptodate(struct drm_connector *connector,
7754
			       int reg_eldv, uint32_t bits_eldv,
7764
			       int reg_eldv, uint32_t bits_eldv,
7755
			       int reg_elda, uint32_t bits_elda,
7765
			       int reg_elda, uint32_t bits_elda,
7756
			       int reg_edid)
7766
			       int reg_edid)
7757
{
7767
{
7758
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7768
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7759
	uint8_t *eld = connector->eld;
7769
	uint8_t *eld = connector->eld;
7760
	uint32_t i;
7770
	uint32_t i;
7761
 
7771
 
7762
	i = I915_READ(reg_eldv);
7772
	i = I915_READ(reg_eldv);
7763
	i &= bits_eldv;
7773
	i &= bits_eldv;
7764
 
7774
 
7765
	if (!eld[0])
7775
	if (!eld[0])
7766
		return !i;
7776
		return !i;
7767
 
7777
 
7768
	if (!i)
7778
	if (!i)
7769
		return false;
7779
		return false;
7770
 
7780
 
7771
	i = I915_READ(reg_elda);
7781
	i = I915_READ(reg_elda);
7772
	i &= ~bits_elda;
7782
	i &= ~bits_elda;
7773
	I915_WRITE(reg_elda, i);
7783
	I915_WRITE(reg_elda, i);
7774
 
7784
 
7775
	for (i = 0; i < eld[2]; i++)
7785
	for (i = 0; i < eld[2]; i++)
7776
		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
7786
		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
7777
			return false;
7787
			return false;
7778
 
7788
 
7779
	return true;
7789
	return true;
7780
}
7790
}
7781
 
7791
 
7782
static void g4x_write_eld(struct drm_connector *connector,
7792
static void g4x_write_eld(struct drm_connector *connector,
7783
			  struct drm_crtc *crtc,
7793
			  struct drm_crtc *crtc,
7784
			  struct drm_display_mode *mode)
7794
			  struct drm_display_mode *mode)
7785
{
7795
{
7786
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7796
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7787
	uint8_t *eld = connector->eld;
7797
	uint8_t *eld = connector->eld;
7788
	uint32_t eldv;
7798
	uint32_t eldv;
7789
	uint32_t len;
7799
	uint32_t len;
7790
	uint32_t i;
7800
	uint32_t i;
7791
 
7801
 
7792
	i = I915_READ(G4X_AUD_VID_DID);
7802
	i = I915_READ(G4X_AUD_VID_DID);
7793
 
7803
 
7794
	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
7804
	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
7795
		eldv = G4X_ELDV_DEVCL_DEVBLC;
7805
		eldv = G4X_ELDV_DEVCL_DEVBLC;
7796
	else
7806
	else
7797
		eldv = G4X_ELDV_DEVCTG;
7807
		eldv = G4X_ELDV_DEVCTG;
7798
 
7808
 
7799
	if (intel_eld_uptodate(connector,
7809
	if (intel_eld_uptodate(connector,
7800
			       G4X_AUD_CNTL_ST, eldv,
7810
			       G4X_AUD_CNTL_ST, eldv,
7801
			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
7811
			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
7802
			       G4X_HDMIW_HDMIEDID))
7812
			       G4X_HDMIW_HDMIEDID))
7803
		return;
7813
		return;
7804
 
7814
 
7805
	i = I915_READ(G4X_AUD_CNTL_ST);
7815
	i = I915_READ(G4X_AUD_CNTL_ST);
7806
	i &= ~(eldv | G4X_ELD_ADDR);
7816
	i &= ~(eldv | G4X_ELD_ADDR);
7807
	len = (i >> 9) & 0x1f;		/* ELD buffer size */
7817
	len = (i >> 9) & 0x1f;		/* ELD buffer size */
7808
	I915_WRITE(G4X_AUD_CNTL_ST, i);
7818
	I915_WRITE(G4X_AUD_CNTL_ST, i);
7809
 
7819
 
7810
	if (!eld[0])
7820
	if (!eld[0])
7811
		return;
7821
		return;
7812
 
7822
 
7813
	len = min_t(uint8_t, eld[2], len);
7823
	len = min_t(uint8_t, eld[2], len);
7814
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7824
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7815
	for (i = 0; i < len; i++)
7825
	for (i = 0; i < len; i++)
7816
		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
7826
		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
7817
 
7827
 
7818
	i = I915_READ(G4X_AUD_CNTL_ST);
7828
	i = I915_READ(G4X_AUD_CNTL_ST);
7819
	i |= eldv;
7829
	i |= eldv;
7820
	I915_WRITE(G4X_AUD_CNTL_ST, i);
7830
	I915_WRITE(G4X_AUD_CNTL_ST, i);
7821
}
7831
}
7822
 
7832
 
7823
static void haswell_write_eld(struct drm_connector *connector,
7833
static void haswell_write_eld(struct drm_connector *connector,
7824
			      struct drm_crtc *crtc,
7834
			      struct drm_crtc *crtc,
7825
			      struct drm_display_mode *mode)
7835
			      struct drm_display_mode *mode)
7826
{
7836
{
7827
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7837
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7828
	uint8_t *eld = connector->eld;
7838
	uint8_t *eld = connector->eld;
7829
	uint32_t eldv;
7839
	uint32_t eldv;
7830
	uint32_t i;
7840
	uint32_t i;
7831
	int len;
7841
	int len;
7832
	int pipe = to_intel_crtc(crtc)->pipe;
7842
	int pipe = to_intel_crtc(crtc)->pipe;
7833
	int tmp;
7843
	int tmp;
7834
 
7844
 
7835
	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
7845
	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
7836
	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
7846
	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
7837
	int aud_config = HSW_AUD_CFG(pipe);
7847
	int aud_config = HSW_AUD_CFG(pipe);
7838
	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
7848
	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
7839
 
7849
 
7840
	/* Audio output enable */
7850
	/* Audio output enable */
7841
	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
7851
	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
7842
	tmp = I915_READ(aud_cntrl_st2);
7852
	tmp = I915_READ(aud_cntrl_st2);
7843
	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
7853
	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
7844
	I915_WRITE(aud_cntrl_st2, tmp);
7854
	I915_WRITE(aud_cntrl_st2, tmp);
7845
	POSTING_READ(aud_cntrl_st2);
7855
	POSTING_READ(aud_cntrl_st2);
7846
 
7856
 
7847
	assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
7857
	assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
7848
 
7858
 
7849
	/* Set ELD valid state */
7859
	/* Set ELD valid state */
7850
	tmp = I915_READ(aud_cntrl_st2);
7860
	tmp = I915_READ(aud_cntrl_st2);
7851
	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
7861
	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
7852
	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
7862
	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
7853
	I915_WRITE(aud_cntrl_st2, tmp);
7863
	I915_WRITE(aud_cntrl_st2, tmp);
7854
	tmp = I915_READ(aud_cntrl_st2);
7864
	tmp = I915_READ(aud_cntrl_st2);
7855
	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
7865
	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
7856
 
7866
 
7857
	/* Enable HDMI mode */
7867
	/* Enable HDMI mode */
7858
	tmp = I915_READ(aud_config);
7868
	tmp = I915_READ(aud_config);
7859
	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
7869
	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
7860
	/* clear N_programing_enable and N_value_index */
7870
	/* clear N_programing_enable and N_value_index */
7861
	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
7871
	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
7862
	I915_WRITE(aud_config, tmp);
7872
	I915_WRITE(aud_config, tmp);
7863
 
7873
 
7864
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7874
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7865
 
7875
 
7866
	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
7876
	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
7867
 
7877
 
7868
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7878
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7869
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7879
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7870
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
7880
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
7871
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7881
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7872
	} else {
7882
	} else {
7873
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7883
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7874
	}
7884
	}
7875
 
7885
 
7876
	if (intel_eld_uptodate(connector,
7886
	if (intel_eld_uptodate(connector,
7877
			       aud_cntrl_st2, eldv,
7887
			       aud_cntrl_st2, eldv,
7878
			       aud_cntl_st, IBX_ELD_ADDRESS,
7888
			       aud_cntl_st, IBX_ELD_ADDRESS,
7879
			       hdmiw_hdmiedid))
7889
			       hdmiw_hdmiedid))
7880
		return;
7890
		return;
7881
 
7891
 
7882
	i = I915_READ(aud_cntrl_st2);
7892
	i = I915_READ(aud_cntrl_st2);
7883
	i &= ~eldv;
7893
	i &= ~eldv;
7884
	I915_WRITE(aud_cntrl_st2, i);
7894
	I915_WRITE(aud_cntrl_st2, i);
7885
 
7895
 
7886
	if (!eld[0])
7896
	if (!eld[0])
7887
		return;
7897
		return;
7888
 
7898
 
7889
	i = I915_READ(aud_cntl_st);
7899
	i = I915_READ(aud_cntl_st);
7890
	i &= ~IBX_ELD_ADDRESS;
7900
	i &= ~IBX_ELD_ADDRESS;
7891
	I915_WRITE(aud_cntl_st, i);
7901
	I915_WRITE(aud_cntl_st, i);
7892
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
7902
	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
7893
	DRM_DEBUG_DRIVER("port num:%d\n", i);
7903
	DRM_DEBUG_DRIVER("port num:%d\n", i);
7894
 
7904
 
7895
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
7905
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
7896
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7906
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7897
	for (i = 0; i < len; i++)
7907
	for (i = 0; i < len; i++)
7898
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7908
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7899
 
7909
 
7900
	i = I915_READ(aud_cntrl_st2);
7910
	i = I915_READ(aud_cntrl_st2);
7901
	i |= eldv;
7911
	i |= eldv;
7902
	I915_WRITE(aud_cntrl_st2, i);
7912
	I915_WRITE(aud_cntrl_st2, i);
7903
 
7913
 
7904
}
7914
}
7905
 
7915
 
7906
static void ironlake_write_eld(struct drm_connector *connector,
7916
static void ironlake_write_eld(struct drm_connector *connector,
7907
			       struct drm_crtc *crtc,
7917
			       struct drm_crtc *crtc,
7908
			       struct drm_display_mode *mode)
7918
			       struct drm_display_mode *mode)
7909
{
7919
{
7910
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7920
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7911
	uint8_t *eld = connector->eld;
7921
	uint8_t *eld = connector->eld;
7912
	uint32_t eldv;
7922
	uint32_t eldv;
7913
	uint32_t i;
7923
	uint32_t i;
7914
	int len;
7924
	int len;
7915
	int hdmiw_hdmiedid;
7925
	int hdmiw_hdmiedid;
7916
	int aud_config;
7926
	int aud_config;
7917
	int aud_cntl_st;
7927
	int aud_cntl_st;
7918
	int aud_cntrl_st2;
7928
	int aud_cntrl_st2;
7919
	int pipe = to_intel_crtc(crtc)->pipe;
7929
	int pipe = to_intel_crtc(crtc)->pipe;
7920
 
7930
 
7921
	if (HAS_PCH_IBX(connector->dev)) {
7931
	if (HAS_PCH_IBX(connector->dev)) {
7922
		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
7932
		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
7923
		aud_config = IBX_AUD_CFG(pipe);
7933
		aud_config = IBX_AUD_CFG(pipe);
7924
		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
7934
		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
7925
		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
7935
		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
7926
	} else if (IS_VALLEYVIEW(connector->dev)) {
7936
	} else if (IS_VALLEYVIEW(connector->dev)) {
7927
		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
7937
		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
7928
		aud_config = VLV_AUD_CFG(pipe);
7938
		aud_config = VLV_AUD_CFG(pipe);
7929
		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
7939
		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
7930
		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
7940
		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
7931
	} else {
7941
	} else {
7932
		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
7942
		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
7933
		aud_config = CPT_AUD_CFG(pipe);
7943
		aud_config = CPT_AUD_CFG(pipe);
7934
		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
7944
		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
7935
		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
7945
		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
7936
	}
7946
	}
7937
 
7947
 
7938
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7948
	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7939
 
7949
 
7940
	if (IS_VALLEYVIEW(connector->dev))  {
7950
	if (IS_VALLEYVIEW(connector->dev))  {
7941
		struct intel_encoder *intel_encoder;
7951
		struct intel_encoder *intel_encoder;
7942
		struct intel_digital_port *intel_dig_port;
7952
		struct intel_digital_port *intel_dig_port;
7943
 
7953
 
7944
		intel_encoder = intel_attached_encoder(connector);
7954
		intel_encoder = intel_attached_encoder(connector);
7945
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
7955
		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
7946
		i = intel_dig_port->port;
7956
		i = intel_dig_port->port;
7947
	} else {
7957
	} else {
7948
	i = I915_READ(aud_cntl_st);
7958
	i = I915_READ(aud_cntl_st);
7949
		i = (i >> 29) & DIP_PORT_SEL_MASK;
7959
		i = (i >> 29) & DIP_PORT_SEL_MASK;
7950
		/* DIP_Port_Select, 0x1 = PortB */
7960
		/* DIP_Port_Select, 0x1 = PortB */
7951
	}
7961
	}
7952
 
7962
 
7953
	if (!i) {
7963
	if (!i) {
7954
		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
7964
		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
7955
		/* operate blindly on all ports */
7965
		/* operate blindly on all ports */
7956
		eldv = IBX_ELD_VALIDB;
7966
		eldv = IBX_ELD_VALIDB;
7957
		eldv |= IBX_ELD_VALIDB << 4;
7967
		eldv |= IBX_ELD_VALIDB << 4;
7958
		eldv |= IBX_ELD_VALIDB << 8;
7968
		eldv |= IBX_ELD_VALIDB << 8;
7959
	} else {
7969
	} else {
7960
		DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
7970
		DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
7961
		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
7971
		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
7962
	}
7972
	}
7963
 
7973
 
7964
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7974
	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7965
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7975
		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7966
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
7976
		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
7967
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7977
		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7968
	} else {
7978
	} else {
7969
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7979
		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7970
	}
7980
	}
7971
 
7981
 
7972
	if (intel_eld_uptodate(connector,
7982
	if (intel_eld_uptodate(connector,
7973
			       aud_cntrl_st2, eldv,
7983
			       aud_cntrl_st2, eldv,
7974
			       aud_cntl_st, IBX_ELD_ADDRESS,
7984
			       aud_cntl_st, IBX_ELD_ADDRESS,
7975
			       hdmiw_hdmiedid))
7985
			       hdmiw_hdmiedid))
7976
		return;
7986
		return;
7977
 
7987
 
7978
	i = I915_READ(aud_cntrl_st2);
7988
	i = I915_READ(aud_cntrl_st2);
7979
	i &= ~eldv;
7989
	i &= ~eldv;
7980
	I915_WRITE(aud_cntrl_st2, i);
7990
	I915_WRITE(aud_cntrl_st2, i);
7981
 
7991
 
7982
	if (!eld[0])
7992
	if (!eld[0])
7983
		return;
7993
		return;
7984
 
7994
 
7985
	i = I915_READ(aud_cntl_st);
7995
	i = I915_READ(aud_cntl_st);
7986
	i &= ~IBX_ELD_ADDRESS;
7996
	i &= ~IBX_ELD_ADDRESS;
7987
	I915_WRITE(aud_cntl_st, i);
7997
	I915_WRITE(aud_cntl_st, i);
7988
 
7998
 
7989
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
7999
	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
7990
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
8000
	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7991
	for (i = 0; i < len; i++)
8001
	for (i = 0; i < len; i++)
7992
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
8002
		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7993
 
8003
 
7994
	i = I915_READ(aud_cntrl_st2);
8004
	i = I915_READ(aud_cntrl_st2);
7995
	i |= eldv;
8005
	i |= eldv;
7996
	I915_WRITE(aud_cntrl_st2, i);
8006
	I915_WRITE(aud_cntrl_st2, i);
7997
}
8007
}
7998
 
8008
 
7999
void intel_write_eld(struct drm_encoder *encoder,
8009
void intel_write_eld(struct drm_encoder *encoder,
8000
		     struct drm_display_mode *mode)
8010
		     struct drm_display_mode *mode)
8001
{
8011
{
8002
	struct drm_crtc *crtc = encoder->crtc;
8012
	struct drm_crtc *crtc = encoder->crtc;
8003
	struct drm_connector *connector;
8013
	struct drm_connector *connector;
8004
	struct drm_device *dev = encoder->dev;
8014
	struct drm_device *dev = encoder->dev;
8005
	struct drm_i915_private *dev_priv = dev->dev_private;
8015
	struct drm_i915_private *dev_priv = dev->dev_private;
8006
 
8016
 
8007
	connector = drm_select_eld(encoder, mode);
8017
	connector = drm_select_eld(encoder, mode);
8008
	if (!connector)
8018
	if (!connector)
8009
		return;
8019
		return;
8010
 
8020
 
8011
	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8021
	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8012
			 connector->base.id,
8022
			 connector->base.id,
8013
			 connector->name,
8023
			 connector->name,
8014
			 connector->encoder->base.id,
8024
			 connector->encoder->base.id,
8015
			 connector->encoder->name);
8025
			 connector->encoder->name);
8016
 
8026
 
8017
	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
8027
	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
8018
 
8028
 
8019
	if (dev_priv->display.write_eld)
8029
	if (dev_priv->display.write_eld)
8020
		dev_priv->display.write_eld(connector, crtc, mode);
8030
		dev_priv->display.write_eld(connector, crtc, mode);
8021
}
8031
}
8022
 
8032
 
8023
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
8033
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
8024
{
8034
{
8025
	struct drm_device *dev = crtc->dev;
8035
	struct drm_device *dev = crtc->dev;
8026
	struct drm_i915_private *dev_priv = dev->dev_private;
8036
	struct drm_i915_private *dev_priv = dev->dev_private;
8027
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8037
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8028
	uint32_t cntl;
8038
	uint32_t cntl;
8029
 
8039
 
8030
	if (base != intel_crtc->cursor_base) {
8040
	if (base != intel_crtc->cursor_base) {
8031
		/* On these chipsets we can only modify the base whilst
8041
		/* On these chipsets we can only modify the base whilst
8032
		 * the cursor is disabled.
8042
		 * the cursor is disabled.
8033
		 */
8043
		 */
8034
		if (intel_crtc->cursor_cntl) {
8044
		if (intel_crtc->cursor_cntl) {
8035
			I915_WRITE(_CURACNTR, 0);
8045
			I915_WRITE(_CURACNTR, 0);
8036
			POSTING_READ(_CURACNTR);
8046
			POSTING_READ(_CURACNTR);
8037
			intel_crtc->cursor_cntl = 0;
8047
			intel_crtc->cursor_cntl = 0;
8038
		}
8048
		}
8039
 
8049
 
8040
		I915_WRITE(_CURABASE, base);
8050
		I915_WRITE(_CURABASE, base);
8041
		POSTING_READ(_CURABASE);
8051
		POSTING_READ(_CURABASE);
8042
	}
8052
	}
8043
 
8053
 
8044
		/* XXX width must be 64, stride 256 => 0x00 << 28 */
8054
		/* XXX width must be 64, stride 256 => 0x00 << 28 */
8045
	cntl = 0;
8055
	cntl = 0;
8046
	if (base)
8056
	if (base)
8047
		cntl = (CURSOR_ENABLE |
8057
		cntl = (CURSOR_ENABLE |
8048
			CURSOR_GAMMA_ENABLE |
8058
			CURSOR_GAMMA_ENABLE |
8049
			CURSOR_FORMAT_ARGB);
8059
			CURSOR_FORMAT_ARGB);
8050
	if (intel_crtc->cursor_cntl != cntl) {
8060
	if (intel_crtc->cursor_cntl != cntl) {
8051
	I915_WRITE(_CURACNTR, cntl);
8061
	I915_WRITE(_CURACNTR, cntl);
8052
		POSTING_READ(_CURACNTR);
8062
		POSTING_READ(_CURACNTR);
8053
		intel_crtc->cursor_cntl = cntl;
8063
		intel_crtc->cursor_cntl = cntl;
8054
	}
8064
	}
8055
}
8065
}
8056
 
8066
 
8057
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8067
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8058
{
8068
{
8059
	struct drm_device *dev = crtc->dev;
8069
	struct drm_device *dev = crtc->dev;
8060
	struct drm_i915_private *dev_priv = dev->dev_private;
8070
	struct drm_i915_private *dev_priv = dev->dev_private;
8061
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8071
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8062
	int pipe = intel_crtc->pipe;
8072
	int pipe = intel_crtc->pipe;
8063
	uint32_t cntl;
8073
	uint32_t cntl;
8064
 
8074
 
8065
	cntl = 0;
8075
	cntl = 0;
8066
		if (base) {
8076
		if (base) {
8067
		cntl = MCURSOR_GAMMA_ENABLE;
8077
		cntl = MCURSOR_GAMMA_ENABLE;
8068
		switch (intel_crtc->cursor_width) {
8078
		switch (intel_crtc->cursor_width) {
8069
			case 64:
8079
			case 64:
8070
				cntl |= CURSOR_MODE_64_ARGB_AX;
8080
				cntl |= CURSOR_MODE_64_ARGB_AX;
8071
				break;
8081
				break;
8072
			case 128:
8082
			case 128:
8073
				cntl |= CURSOR_MODE_128_ARGB_AX;
8083
				cntl |= CURSOR_MODE_128_ARGB_AX;
8074
				break;
8084
				break;
8075
			case 256:
8085
			case 256:
8076
				cntl |= CURSOR_MODE_256_ARGB_AX;
8086
				cntl |= CURSOR_MODE_256_ARGB_AX;
8077
				break;
8087
				break;
8078
			default:
8088
			default:
8079
				WARN_ON(1);
8089
				WARN_ON(1);
8080
				return;
8090
				return;
8081
			}
8091
			}
8082
			cntl |= pipe << 28; /* Connect to correct pipe */
8092
			cntl |= pipe << 28; /* Connect to correct pipe */
8083
		}
8093
		}
8084
	if (intel_crtc->cursor_cntl != cntl) {
8094
	if (intel_crtc->cursor_cntl != cntl) {
8085
		I915_WRITE(CURCNTR(pipe), cntl);
8095
		I915_WRITE(CURCNTR(pipe), cntl);
8086
		POSTING_READ(CURCNTR(pipe));
8096
		POSTING_READ(CURCNTR(pipe));
8087
		intel_crtc->cursor_cntl = cntl;
8097
		intel_crtc->cursor_cntl = cntl;
8088
	}
8098
	}
8089
 
8099
 
8090
	/* and commit changes on next vblank */
8100
	/* and commit changes on next vblank */
8091
	I915_WRITE(CURBASE(pipe), base);
8101
	I915_WRITE(CURBASE(pipe), base);
8092
	POSTING_READ(CURBASE(pipe));
8102
	POSTING_READ(CURBASE(pipe));
8093
}
8103
}
8094
 
8104
 
8095
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
8105
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
8096
{
8106
{
8097
	struct drm_device *dev = crtc->dev;
8107
	struct drm_device *dev = crtc->dev;
8098
	struct drm_i915_private *dev_priv = dev->dev_private;
8108
	struct drm_i915_private *dev_priv = dev->dev_private;
8099
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8109
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8100
	int pipe = intel_crtc->pipe;
8110
	int pipe = intel_crtc->pipe;
8101
	uint32_t cntl;
8111
	uint32_t cntl;
8102
 
8112
 
8103
	cntl = 0;
8113
	cntl = 0;
8104
		if (base) {
8114
		if (base) {
8105
		cntl = MCURSOR_GAMMA_ENABLE;
8115
		cntl = MCURSOR_GAMMA_ENABLE;
8106
		switch (intel_crtc->cursor_width) {
8116
		switch (intel_crtc->cursor_width) {
8107
			case 64:
8117
			case 64:
8108
				cntl |= CURSOR_MODE_64_ARGB_AX;
8118
				cntl |= CURSOR_MODE_64_ARGB_AX;
8109
				break;
8119
				break;
8110
			case 128:
8120
			case 128:
8111
				cntl |= CURSOR_MODE_128_ARGB_AX;
8121
				cntl |= CURSOR_MODE_128_ARGB_AX;
8112
				break;
8122
				break;
8113
			case 256:
8123
			case 256:
8114
				cntl |= CURSOR_MODE_256_ARGB_AX;
8124
				cntl |= CURSOR_MODE_256_ARGB_AX;
8115
				break;
8125
				break;
8116
			default:
8126
			default:
8117
				WARN_ON(1);
8127
				WARN_ON(1);
8118
				return;
8128
				return;
8119
			}
8129
			}
8120
		}
8130
		}
8121
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
8131
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
8122
			cntl |= CURSOR_PIPE_CSC_ENABLE;
8132
			cntl |= CURSOR_PIPE_CSC_ENABLE;
8123
 
8133
 
8124
	if (intel_crtc->cursor_cntl != cntl) {
8134
	if (intel_crtc->cursor_cntl != cntl) {
8125
		I915_WRITE(CURCNTR(pipe), cntl);
8135
		I915_WRITE(CURCNTR(pipe), cntl);
8126
		POSTING_READ(CURCNTR(pipe));
8136
		POSTING_READ(CURCNTR(pipe));
8127
		intel_crtc->cursor_cntl = cntl;
8137
		intel_crtc->cursor_cntl = cntl;
8128
		}
8138
		}
8129
 
8139
 
8130
	/* and commit changes on next vblank */
8140
	/* and commit changes on next vblank */
8131
	I915_WRITE(CURBASE(pipe), base);
8141
	I915_WRITE(CURBASE(pipe), base);
8132
	POSTING_READ(CURBASE(pipe));
8142
	POSTING_READ(CURBASE(pipe));
8133
}
8143
}
8134
 
8144
 
8135
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
8145
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
8136
void intel_crtc_update_cursor(struct drm_crtc *crtc,
8146
void intel_crtc_update_cursor(struct drm_crtc *crtc,
8137
				     bool on)
8147
				     bool on)
8138
{
8148
{
8139
	struct drm_device *dev = crtc->dev;
8149
	struct drm_device *dev = crtc->dev;
8140
	struct drm_i915_private *dev_priv = dev->dev_private;
8150
	struct drm_i915_private *dev_priv = dev->dev_private;
8141
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8151
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8142
	int pipe = intel_crtc->pipe;
8152
	int pipe = intel_crtc->pipe;
8143
	int x = crtc->cursor_x;
8153
	int x = crtc->cursor_x;
8144
	int y = crtc->cursor_y;
8154
	int y = crtc->cursor_y;
8145
	u32 base = 0, pos = 0;
8155
	u32 base = 0, pos = 0;
8146
 
8156
 
8147
	if (on)
8157
	if (on)
8148
		base = intel_crtc->cursor_addr;
8158
		base = intel_crtc->cursor_addr;
8149
 
8159
 
8150
	if (x >= intel_crtc->config.pipe_src_w)
8160
	if (x >= intel_crtc->config.pipe_src_w)
8151
			base = 0;
8161
			base = 0;
8152
 
8162
 
8153
	if (y >= intel_crtc->config.pipe_src_h)
8163
	if (y >= intel_crtc->config.pipe_src_h)
8154
		base = 0;
8164
		base = 0;
8155
 
8165
 
8156
	if (x < 0) {
8166
	if (x < 0) {
8157
		if (x + intel_crtc->cursor_width <= 0)
8167
		if (x + intel_crtc->cursor_width <= 0)
8158
			base = 0;
8168
			base = 0;
8159
 
8169
 
8160
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
8170
		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
8161
		x = -x;
8171
		x = -x;
8162
	}
8172
	}
8163
	pos |= x << CURSOR_X_SHIFT;
8173
	pos |= x << CURSOR_X_SHIFT;
8164
 
8174
 
8165
	if (y < 0) {
8175
	if (y < 0) {
8166
		if (y + intel_crtc->cursor_height <= 0)
8176
		if (y + intel_crtc->cursor_height <= 0)
8167
			base = 0;
8177
			base = 0;
8168
 
8178
 
8169
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
8179
		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
8170
		y = -y;
8180
		y = -y;
8171
	}
8181
	}
8172
	pos |= y << CURSOR_Y_SHIFT;
8182
	pos |= y << CURSOR_Y_SHIFT;
8173
 
8183
 
8174
	if (base == 0 && intel_crtc->cursor_base == 0)
8184
	if (base == 0 && intel_crtc->cursor_base == 0)
8175
		return;
8185
		return;
8176
 
8186
 
8177
	I915_WRITE(CURPOS(pipe), pos);
8187
	I915_WRITE(CURPOS(pipe), pos);
8178
 
8188
 
8179
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
8189
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
8180
		ivb_update_cursor(crtc, base);
8190
		ivb_update_cursor(crtc, base);
8181
	else if (IS_845G(dev) || IS_I865G(dev))
8191
	else if (IS_845G(dev) || IS_I865G(dev))
8182
		i845_update_cursor(crtc, base);
8192
		i845_update_cursor(crtc, base);
8183
	else
8193
	else
8184
		i9xx_update_cursor(crtc, base);
8194
		i9xx_update_cursor(crtc, base);
8185
	intel_crtc->cursor_base = base;
8195
	intel_crtc->cursor_base = base;
8186
}
8196
}
8187
 
8197
 
8188
/*
8198
/*
8189
 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
8199
 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
8190
 *
8200
 *
8191
 * Note that the object's reference will be consumed if the update fails.  If
8201
 * Note that the object's reference will be consumed if the update fails.  If
8192
 * the update succeeds, the reference of the old object (if any) will be
8202
 * the update succeeds, the reference of the old object (if any) will be
8193
 * consumed.
8203
 * consumed.
8194
 */
8204
 */
8195
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8205
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8196
				     struct drm_i915_gem_object *obj,
8206
				     struct drm_i915_gem_object *obj,
8197
				 uint32_t width, uint32_t height)
8207
				 uint32_t width, uint32_t height)
8198
{
8208
{
8199
	struct drm_device *dev = crtc->dev;
8209
	struct drm_device *dev = crtc->dev;
8200
	struct drm_i915_private *dev_priv = dev->dev_private;
8210
	struct drm_i915_private *dev_priv = dev->dev_private;
8201
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8211
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8202
	enum pipe pipe = intel_crtc->pipe;
8212
	enum pipe pipe = intel_crtc->pipe;
8203
	unsigned old_width;
8213
	unsigned old_width;
8204
	uint32_t addr;
8214
	uint32_t addr;
8205
	int ret;
8215
	int ret;
8206
 
8216
 
8207
	/* if we want to turn off the cursor ignore width and height */
8217
	/* if we want to turn off the cursor ignore width and height */
8208
	if (!obj) {
8218
	if (!obj) {
8209
		DRM_DEBUG_KMS("cursor off\n");
8219
		DRM_DEBUG_KMS("cursor off\n");
8210
		addr = 0;
8220
		addr = 0;
8211
		obj = NULL;
8221
		obj = NULL;
8212
		mutex_lock(&dev->struct_mutex);
8222
		mutex_lock(&dev->struct_mutex);
8213
		goto finish;
8223
		goto finish;
8214
	}
8224
	}
8215
 
8225
 
8216
	/* Check for which cursor types we support */
8226
	/* Check for which cursor types we support */
8217
	if (!((width == 64 && height == 64) ||
8227
	if (!((width == 64 && height == 64) ||
8218
			(width == 128 && height == 128 && !IS_GEN2(dev)) ||
8228
			(width == 128 && height == 128 && !IS_GEN2(dev)) ||
8219
			(width == 256 && height == 256 && !IS_GEN2(dev)))) {
8229
			(width == 256 && height == 256 && !IS_GEN2(dev)))) {
8220
		DRM_DEBUG("Cursor dimension not supported\n");
8230
		DRM_DEBUG("Cursor dimension not supported\n");
8221
		return -EINVAL;
8231
		return -EINVAL;
8222
	}
8232
	}
8223
 
8233
 
8224
	if (obj->base.size < width * height * 4) {
8234
	if (obj->base.size < width * height * 4) {
8225
		DRM_DEBUG_KMS("buffer is too small\n");
8235
		DRM_DEBUG_KMS("buffer is too small\n");
8226
		ret = -ENOMEM;
8236
		ret = -ENOMEM;
8227
		goto fail;
8237
		goto fail;
8228
	}
8238
	}
8229
 
8239
 
8230
	/* we only need to pin inside GTT if cursor is non-phy */
8240
	/* we only need to pin inside GTT if cursor is non-phy */
8231
	mutex_lock(&dev->struct_mutex);
8241
	mutex_lock(&dev->struct_mutex);
8232
	if (!INTEL_INFO(dev)->cursor_needs_physical) {
8242
	if (!INTEL_INFO(dev)->cursor_needs_physical) {
8233
		unsigned alignment;
8243
		unsigned alignment;
8234
 
8244
 
8235
		if (obj->tiling_mode) {
8245
		if (obj->tiling_mode) {
8236
			DRM_DEBUG_KMS("cursor cannot be tiled\n");
8246
			DRM_DEBUG_KMS("cursor cannot be tiled\n");
8237
			ret = -EINVAL;
8247
			ret = -EINVAL;
8238
			goto fail_locked;
8248
			goto fail_locked;
8239
		}
8249
		}
-
 
8250
 
-
 
8251
		/*
-
 
8252
		 * Global gtt pte registers are special registers which actually
-
 
8253
		 * forward writes to a chunk of system memory. Which means that
-
 
8254
		 * there is no risk that the register values disappear as soon
-
 
8255
		 * as we call intel_runtime_pm_put(), so it is correct to wrap
-
 
8256
		 * only the pin/unpin/fence and not more.
-
 
8257
		 */
-
 
8258
		intel_runtime_pm_get(dev_priv);
8240
 
8259
 
8241
		/* Note that the w/a also requires 2 PTE of padding following
8260
		/* Note that the w/a also requires 2 PTE of padding following
8242
		 * the bo. We currently fill all unused PTE with the shadow
8261
		 * the bo. We currently fill all unused PTE with the shadow
8243
		 * page and so we should always have valid PTE following the
8262
		 * page and so we should always have valid PTE following the
8244
		 * cursor preventing the VT-d warning.
8263
		 * cursor preventing the VT-d warning.
8245
		 */
8264
		 */
8246
		alignment = 0;
8265
		alignment = 0;
8247
		if (need_vtd_wa(dev))
8266
		if (need_vtd_wa(dev))
8248
			alignment = 64*1024;
8267
			alignment = 64*1024;
8249
 
8268
 
8250
		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
8269
		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
8251
		if (ret) {
8270
		if (ret) {
8252
			DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
8271
			DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
-
 
8272
			intel_runtime_pm_put(dev_priv);
8253
			goto fail_locked;
8273
			goto fail_locked;
8254
		}
8274
		}
8255
 
8275
 
8256
		ret = i915_gem_object_put_fence(obj);
8276
		ret = i915_gem_object_put_fence(obj);
8257
		if (ret) {
8277
		if (ret) {
8258
			DRM_DEBUG_KMS("failed to release fence for cursor");
8278
			DRM_DEBUG_KMS("failed to release fence for cursor");
-
 
8279
			intel_runtime_pm_put(dev_priv);
8259
			goto fail_unpin;
8280
			goto fail_unpin;
8260
		}
8281
		}
8261
 
8282
 
8262
		addr = i915_gem_obj_ggtt_offset(obj);
8283
		addr = i915_gem_obj_ggtt_offset(obj);
-
 
8284
 
-
 
8285
		intel_runtime_pm_put(dev_priv);
8263
	} else {
8286
	} else {
8264
		int align = IS_I830(dev) ? 16 * 1024 : 256;
8287
		int align = IS_I830(dev) ? 16 * 1024 : 256;
8265
//		ret = i915_gem_object_attach_phys(obj, align);
8288
//		ret = i915_gem_object_attach_phys(obj, align);
8266
//		if (ret) {
8289
//		if (ret) {
8267
//			DRM_DEBUG_KMS("failed to attach phys object\n");
8290
//			DRM_DEBUG_KMS("failed to attach phys object\n");
8268
//			goto fail_locked;
8291
//			goto fail_locked;
8269
//		}
8292
//		}
8270
//		addr = obj->phys_handle->busaddr;
8293
//		addr = obj->phys_handle->busaddr;
8271
	}
8294
	}
8272
 
8295
 
8273
	if (IS_GEN2(dev))
8296
	if (IS_GEN2(dev))
8274
		I915_WRITE(CURSIZE, (height << 12) | width);
8297
		I915_WRITE(CURSIZE, (height << 12) | width);
8275
 
8298
 
8276
 finish:
8299
 finish:
8277
	if (intel_crtc->cursor_bo) {
8300
	if (intel_crtc->cursor_bo) {
8278
		if (!INTEL_INFO(dev)->cursor_needs_physical)
8301
		if (!INTEL_INFO(dev)->cursor_needs_physical)
8279
			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
8302
			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
8280
	}
8303
	}
8281
 
8304
 
8282
	i915_gem_track_fb(intel_crtc->cursor_bo, obj,
8305
	i915_gem_track_fb(intel_crtc->cursor_bo, obj,
8283
			  INTEL_FRONTBUFFER_CURSOR(pipe));
8306
			  INTEL_FRONTBUFFER_CURSOR(pipe));
8284
	mutex_unlock(&dev->struct_mutex);
8307
	mutex_unlock(&dev->struct_mutex);
8285
 
8308
 
8286
	old_width = intel_crtc->cursor_width;
8309
	old_width = intel_crtc->cursor_width;
8287
 
8310
 
8288
	intel_crtc->cursor_addr = addr;
8311
	intel_crtc->cursor_addr = addr;
8289
	intel_crtc->cursor_bo = obj;
8312
	intel_crtc->cursor_bo = obj;
8290
	intel_crtc->cursor_width = width;
8313
	intel_crtc->cursor_width = width;
8291
	intel_crtc->cursor_height = height;
8314
	intel_crtc->cursor_height = height;
8292
 
8315
 
8293
	if (intel_crtc->active) {
8316
	if (intel_crtc->active) {
8294
		if (old_width != width)
8317
		if (old_width != width)
8295
			intel_update_watermarks(crtc);
8318
			intel_update_watermarks(crtc);
8296
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
8319
		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
8297
	}
8320
	}
8298
 
8321
 
8299
	return 0;
8322
	return 0;
8300
fail_unpin:
8323
fail_unpin:
8301
	i915_gem_object_unpin_from_display_plane(obj);
8324
	i915_gem_object_unpin_from_display_plane(obj);
8302
fail_locked:
8325
fail_locked:
8303
	mutex_unlock(&dev->struct_mutex);
8326
	mutex_unlock(&dev->struct_mutex);
8304
fail:
8327
fail:
8305
	drm_gem_object_unreference_unlocked(&obj->base);
8328
	drm_gem_object_unreference_unlocked(&obj->base);
8306
	return ret;
8329
	return ret;
8307
}
8330
}
8308
 
8331
 
8309
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8332
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8310
				 u16 *blue, uint32_t start, uint32_t size)
8333
				 u16 *blue, uint32_t start, uint32_t size)
8311
{
8334
{
8312
	int end = (start + size > 256) ? 256 : start + size, i;
8335
	int end = (start + size > 256) ? 256 : start + size, i;
8313
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8336
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8314
 
8337
 
8315
	for (i = start; i < end; i++) {
8338
	for (i = start; i < end; i++) {
8316
		intel_crtc->lut_r[i] = red[i] >> 8;
8339
		intel_crtc->lut_r[i] = red[i] >> 8;
8317
		intel_crtc->lut_g[i] = green[i] >> 8;
8340
		intel_crtc->lut_g[i] = green[i] >> 8;
8318
		intel_crtc->lut_b[i] = blue[i] >> 8;
8341
		intel_crtc->lut_b[i] = blue[i] >> 8;
8319
	}
8342
	}
8320
 
8343
 
8321
	intel_crtc_load_lut(crtc);
8344
	intel_crtc_load_lut(crtc);
8322
}
8345
}
8323
 
8346
 
8324
/* VESA 640x480x72Hz mode to set on the pipe */
8347
/* VESA 640x480x72Hz mode to set on the pipe */
8325
static struct drm_display_mode load_detect_mode = {
8348
static struct drm_display_mode load_detect_mode = {
8326
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8349
	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8327
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8350
		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8328
};
8351
};
8329
 
8352
 
8330
struct drm_framebuffer *
8353
struct drm_framebuffer *
8331
__intel_framebuffer_create(struct drm_device *dev,
8354
__intel_framebuffer_create(struct drm_device *dev,
8332
			 struct drm_mode_fb_cmd2 *mode_cmd,
8355
			 struct drm_mode_fb_cmd2 *mode_cmd,
8333
			 struct drm_i915_gem_object *obj)
8356
			 struct drm_i915_gem_object *obj)
8334
{
8357
{
8335
	struct intel_framebuffer *intel_fb;
8358
	struct intel_framebuffer *intel_fb;
8336
	int ret;
8359
	int ret;
8337
 
8360
 
8338
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8361
	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8339
	if (!intel_fb) {
8362
	if (!intel_fb) {
8340
		drm_gem_object_unreference_unlocked(&obj->base);
8363
		drm_gem_object_unreference_unlocked(&obj->base);
8341
		return ERR_PTR(-ENOMEM);
8364
		return ERR_PTR(-ENOMEM);
8342
	}
8365
	}
8343
 
8366
 
8344
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
8367
	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
8345
	if (ret)
8368
	if (ret)
8346
		goto err;
8369
		goto err;
8347
 
8370
 
8348
	return &intel_fb->base;
8371
	return &intel_fb->base;
8349
err:
8372
err:
8350
		drm_gem_object_unreference_unlocked(&obj->base);
8373
		drm_gem_object_unreference_unlocked(&obj->base);
8351
		kfree(intel_fb);
8374
		kfree(intel_fb);
8352
 
8375
 
8353
		return ERR_PTR(ret);
8376
		return ERR_PTR(ret);
8354
}
8377
}
8355
 
8378
 
8356
static struct drm_framebuffer *
8379
static struct drm_framebuffer *
8357
intel_framebuffer_create(struct drm_device *dev,
8380
intel_framebuffer_create(struct drm_device *dev,
8358
			 struct drm_mode_fb_cmd2 *mode_cmd,
8381
			 struct drm_mode_fb_cmd2 *mode_cmd,
8359
			 struct drm_i915_gem_object *obj)
8382
			 struct drm_i915_gem_object *obj)
8360
{
8383
{
8361
	struct drm_framebuffer *fb;
8384
	struct drm_framebuffer *fb;
8362
	int ret;
8385
	int ret;
8363
 
8386
 
8364
	ret = i915_mutex_lock_interruptible(dev);
8387
	ret = i915_mutex_lock_interruptible(dev);
8365
	if (ret)
8388
	if (ret)
8366
		return ERR_PTR(ret);
8389
		return ERR_PTR(ret);
8367
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
8390
	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
8368
	mutex_unlock(&dev->struct_mutex);
8391
	mutex_unlock(&dev->struct_mutex);
8369
 
8392
 
8370
	return fb;
8393
	return fb;
8371
}
8394
}
8372
 
8395
 
8373
static u32
8396
static u32
8374
intel_framebuffer_pitch_for_width(int width, int bpp)
8397
intel_framebuffer_pitch_for_width(int width, int bpp)
8375
{
8398
{
8376
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
8399
	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
8377
	return ALIGN(pitch, 64);
8400
	return ALIGN(pitch, 64);
8378
}
8401
}
8379
 
8402
 
8380
static u32
8403
static u32
8381
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8404
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8382
{
8405
{
8383
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
8406
	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
8384
	return PAGE_ALIGN(pitch * mode->vdisplay);
8407
	return PAGE_ALIGN(pitch * mode->vdisplay);
8385
}
8408
}
8386
 
8409
 
8387
static struct drm_framebuffer *
8410
static struct drm_framebuffer *
8388
intel_framebuffer_create_for_mode(struct drm_device *dev,
8411
intel_framebuffer_create_for_mode(struct drm_device *dev,
8389
				  struct drm_display_mode *mode,
8412
				  struct drm_display_mode *mode,
8390
				  int depth, int bpp)
8413
				  int depth, int bpp)
8391
{
8414
{
8392
	struct drm_i915_gem_object *obj;
8415
	struct drm_i915_gem_object *obj;
8393
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
8416
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
8394
 
8417
 
8395
	obj = i915_gem_alloc_object(dev,
8418
	obj = i915_gem_alloc_object(dev,
8396
				    intel_framebuffer_size_for_mode(mode, bpp));
8419
				    intel_framebuffer_size_for_mode(mode, bpp));
8397
	if (obj == NULL)
8420
	if (obj == NULL)
8398
		return ERR_PTR(-ENOMEM);
8421
		return ERR_PTR(-ENOMEM);
8399
 
8422
 
8400
	mode_cmd.width = mode->hdisplay;
8423
	mode_cmd.width = mode->hdisplay;
8401
	mode_cmd.height = mode->vdisplay;
8424
	mode_cmd.height = mode->vdisplay;
8402
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
8425
	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
8403
								bpp);
8426
								bpp);
8404
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
8427
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
8405
 
8428
 
8406
	return intel_framebuffer_create(dev, &mode_cmd, obj);
8429
	return intel_framebuffer_create(dev, &mode_cmd, obj);
8407
}
8430
}
8408
 
8431
 
8409
static struct drm_framebuffer *
8432
static struct drm_framebuffer *
8410
mode_fits_in_fbdev(struct drm_device *dev,
8433
mode_fits_in_fbdev(struct drm_device *dev,
8411
		   struct drm_display_mode *mode)
8434
		   struct drm_display_mode *mode)
8412
{
8435
{
8413
#ifdef CONFIG_DRM_I915_FBDEV
8436
#ifdef CONFIG_DRM_I915_FBDEV
8414
	struct drm_i915_private *dev_priv = dev->dev_private;
8437
	struct drm_i915_private *dev_priv = dev->dev_private;
8415
	struct drm_i915_gem_object *obj;
8438
	struct drm_i915_gem_object *obj;
8416
	struct drm_framebuffer *fb;
8439
	struct drm_framebuffer *fb;
8417
 
8440
 
8418
	if (!dev_priv->fbdev)
8441
	if (!dev_priv->fbdev)
8419
		return NULL;
8442
		return NULL;
8420
 
8443
 
8421
	if (!dev_priv->fbdev->fb)
8444
	if (!dev_priv->fbdev->fb)
8422
		return NULL;
8445
		return NULL;
8423
 
8446
 
8424
	obj = dev_priv->fbdev->fb->obj;
8447
	obj = dev_priv->fbdev->fb->obj;
8425
	BUG_ON(!obj);
8448
	BUG_ON(!obj);
8426
 
8449
 
8427
	fb = &dev_priv->fbdev->fb->base;
8450
	fb = &dev_priv->fbdev->fb->base;
8428
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
8451
	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
8429
							       fb->bits_per_pixel))
8452
							       fb->bits_per_pixel))
8430
		return NULL;
8453
		return NULL;
8431
 
8454
 
8432
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
8455
	if (obj->base.size < mode->vdisplay * fb->pitches[0])
8433
		return NULL;
8456
		return NULL;
8434
 
8457
 
8435
	return fb;
8458
	return fb;
8436
#else
8459
#else
8437
	return NULL;
8460
	return NULL;
8438
#endif
8461
#endif
8439
}
8462
}
8440
 
8463
 
8441
bool intel_get_load_detect_pipe(struct drm_connector *connector,
8464
bool intel_get_load_detect_pipe(struct drm_connector *connector,
8442
				struct drm_display_mode *mode,
8465
				struct drm_display_mode *mode,
8443
				struct intel_load_detect_pipe *old,
8466
				struct intel_load_detect_pipe *old,
8444
				struct drm_modeset_acquire_ctx *ctx)
8467
				struct drm_modeset_acquire_ctx *ctx)
8445
{
8468
{
8446
	struct intel_crtc *intel_crtc;
8469
	struct intel_crtc *intel_crtc;
8447
	struct intel_encoder *intel_encoder =
8470
	struct intel_encoder *intel_encoder =
8448
		intel_attached_encoder(connector);
8471
		intel_attached_encoder(connector);
8449
	struct drm_crtc *possible_crtc;
8472
	struct drm_crtc *possible_crtc;
8450
	struct drm_encoder *encoder = &intel_encoder->base;
8473
	struct drm_encoder *encoder = &intel_encoder->base;
8451
	struct drm_crtc *crtc = NULL;
8474
	struct drm_crtc *crtc = NULL;
8452
	struct drm_device *dev = encoder->dev;
8475
	struct drm_device *dev = encoder->dev;
8453
	struct drm_framebuffer *fb;
8476
	struct drm_framebuffer *fb;
8454
	struct drm_mode_config *config = &dev->mode_config;
8477
	struct drm_mode_config *config = &dev->mode_config;
8455
	int ret, i = -1;
8478
	int ret, i = -1;
8456
 
8479
 
8457
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8480
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8458
		      connector->base.id, connector->name,
8481
		      connector->base.id, connector->name,
8459
		      encoder->base.id, encoder->name);
8482
		      encoder->base.id, encoder->name);
8460
 
8483
 
8461
retry:
8484
retry:
8462
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
8485
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
8463
	if (ret)
8486
	if (ret)
8464
		goto fail_unlock;
8487
		goto fail_unlock;
8465
 
8488
 
8466
	/*
8489
	/*
8467
	 * Algorithm gets a little messy:
8490
	 * Algorithm gets a little messy:
8468
	 *
8491
	 *
8469
	 *   - if the connector already has an assigned crtc, use it (but make
8492
	 *   - if the connector already has an assigned crtc, use it (but make
8470
	 *     sure it's on first)
8493
	 *     sure it's on first)
8471
	 *
8494
	 *
8472
	 *   - try to find the first unused crtc that can drive this connector,
8495
	 *   - try to find the first unused crtc that can drive this connector,
8473
	 *     and use that if we find one
8496
	 *     and use that if we find one
8474
	 */
8497
	 */
8475
 
8498
 
8476
	/* See if we already have a CRTC for this connector */
8499
	/* See if we already have a CRTC for this connector */
8477
	if (encoder->crtc) {
8500
	if (encoder->crtc) {
8478
		crtc = encoder->crtc;
8501
		crtc = encoder->crtc;
8479
 
8502
 
8480
		ret = drm_modeset_lock(&crtc->mutex, ctx);
8503
		ret = drm_modeset_lock(&crtc->mutex, ctx);
8481
		if (ret)
8504
		if (ret)
8482
			goto fail_unlock;
8505
			goto fail_unlock;
8483
 
8506
 
8484
		old->dpms_mode = connector->dpms;
8507
		old->dpms_mode = connector->dpms;
8485
		old->load_detect_temp = false;
8508
		old->load_detect_temp = false;
8486
 
8509
 
8487
		/* Make sure the crtc and connector are running */
8510
		/* Make sure the crtc and connector are running */
8488
		if (connector->dpms != DRM_MODE_DPMS_ON)
8511
		if (connector->dpms != DRM_MODE_DPMS_ON)
8489
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
8512
			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
8490
 
8513
 
8491
		return true;
8514
		return true;
8492
	}
8515
	}
8493
 
8516
 
8494
	/* Find an unused one (if possible) */
8517
	/* Find an unused one (if possible) */
8495
	for_each_crtc(dev, possible_crtc) {
8518
	for_each_crtc(dev, possible_crtc) {
8496
		i++;
8519
		i++;
8497
		if (!(encoder->possible_crtcs & (1 << i)))
8520
		if (!(encoder->possible_crtcs & (1 << i)))
8498
			continue;
8521
			continue;
8499
		if (possible_crtc->enabled)
8522
		if (possible_crtc->enabled)
8500
			continue;
8523
			continue;
8501
		/* This can occur when applying the pipe A quirk on resume. */
8524
		/* This can occur when applying the pipe A quirk on resume. */
8502
		if (to_intel_crtc(possible_crtc)->new_enabled)
8525
		if (to_intel_crtc(possible_crtc)->new_enabled)
8503
			continue;
8526
			continue;
8504
 
8527
 
8505
			crtc = possible_crtc;
8528
			crtc = possible_crtc;
8506
			break;
8529
			break;
8507
		}
8530
		}
8508
 
8531
 
8509
	/*
8532
	/*
8510
	 * If we didn't find an unused CRTC, don't use any.
8533
	 * If we didn't find an unused CRTC, don't use any.
8511
	 */
8534
	 */
8512
	if (!crtc) {
8535
	if (!crtc) {
8513
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
8536
		DRM_DEBUG_KMS("no pipe available for load-detect\n");
8514
		goto fail_unlock;
8537
		goto fail_unlock;
8515
	}
8538
	}
8516
 
8539
 
8517
	ret = drm_modeset_lock(&crtc->mutex, ctx);
8540
	ret = drm_modeset_lock(&crtc->mutex, ctx);
8518
	if (ret)
8541
	if (ret)
8519
		goto fail_unlock;
8542
		goto fail_unlock;
8520
	intel_encoder->new_crtc = to_intel_crtc(crtc);
8543
	intel_encoder->new_crtc = to_intel_crtc(crtc);
8521
	to_intel_connector(connector)->new_encoder = intel_encoder;
8544
	to_intel_connector(connector)->new_encoder = intel_encoder;
8522
 
8545
 
8523
	intel_crtc = to_intel_crtc(crtc);
8546
	intel_crtc = to_intel_crtc(crtc);
8524
	intel_crtc->new_enabled = true;
8547
	intel_crtc->new_enabled = true;
8525
	intel_crtc->new_config = &intel_crtc->config;
8548
	intel_crtc->new_config = &intel_crtc->config;
8526
	old->dpms_mode = connector->dpms;
8549
	old->dpms_mode = connector->dpms;
8527
	old->load_detect_temp = true;
8550
	old->load_detect_temp = true;
8528
	old->release_fb = NULL;
8551
	old->release_fb = NULL;
8529
 
8552
 
8530
	if (!mode)
8553
	if (!mode)
8531
		mode = &load_detect_mode;
8554
		mode = &load_detect_mode;
8532
 
8555
 
8533
	/* We need a framebuffer large enough to accommodate all accesses
8556
	/* We need a framebuffer large enough to accommodate all accesses
8534
	 * that the plane may generate whilst we perform load detection.
8557
	 * that the plane may generate whilst we perform load detection.
8535
	 * We can not rely on the fbcon either being present (we get called
8558
	 * We can not rely on the fbcon either being present (we get called
8536
	 * during its initialisation to detect all boot displays, or it may
8559
	 * during its initialisation to detect all boot displays, or it may
8537
	 * not even exist) or that it is large enough to satisfy the
8560
	 * not even exist) or that it is large enough to satisfy the
8538
	 * requested mode.
8561
	 * requested mode.
8539
	 */
8562
	 */
8540
	fb = mode_fits_in_fbdev(dev, mode);
8563
	fb = mode_fits_in_fbdev(dev, mode);
8541
	if (fb == NULL) {
8564
	if (fb == NULL) {
8542
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
8565
		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
8543
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
8566
		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
8544
		old->release_fb = fb;
8567
		old->release_fb = fb;
8545
	} else
8568
	} else
8546
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
8569
		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
8547
	if (IS_ERR(fb)) {
8570
	if (IS_ERR(fb)) {
8548
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
8571
		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
8549
		goto fail;
8572
		goto fail;
8550
	}
8573
	}
8551
 
8574
 
8552
	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
8575
	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
8553
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
8576
		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
8554
		if (old->release_fb)
8577
		if (old->release_fb)
8555
			old->release_fb->funcs->destroy(old->release_fb);
8578
			old->release_fb->funcs->destroy(old->release_fb);
8556
		goto fail;
8579
		goto fail;
8557
	}
8580
	}
8558
 
8581
 
8559
	/* let the connector get through one full cycle before testing */
8582
	/* let the connector get through one full cycle before testing */
8560
	intel_wait_for_vblank(dev, intel_crtc->pipe);
8583
	intel_wait_for_vblank(dev, intel_crtc->pipe);
8561
	return true;
8584
	return true;
8562
 
8585
 
8563
 fail:
8586
 fail:
8564
	intel_crtc->new_enabled = crtc->enabled;
8587
	intel_crtc->new_enabled = crtc->enabled;
8565
	if (intel_crtc->new_enabled)
8588
	if (intel_crtc->new_enabled)
8566
		intel_crtc->new_config = &intel_crtc->config;
8589
		intel_crtc->new_config = &intel_crtc->config;
8567
	else
8590
	else
8568
		intel_crtc->new_config = NULL;
8591
		intel_crtc->new_config = NULL;
8569
fail_unlock:
8592
fail_unlock:
8570
	if (ret == -EDEADLK) {
8593
	if (ret == -EDEADLK) {
8571
		drm_modeset_backoff(ctx);
8594
		drm_modeset_backoff(ctx);
8572
		goto retry;
8595
		goto retry;
8573
	}
8596
	}
8574
 
8597
 
8575
	return false;
8598
	return false;
8576
}
8599
}
8577
 
8600
 
8578
void intel_release_load_detect_pipe(struct drm_connector *connector,
8601
void intel_release_load_detect_pipe(struct drm_connector *connector,
8579
				    struct intel_load_detect_pipe *old)
8602
				    struct intel_load_detect_pipe *old)
8580
{
8603
{
8581
	struct intel_encoder *intel_encoder =
8604
	struct intel_encoder *intel_encoder =
8582
		intel_attached_encoder(connector);
8605
		intel_attached_encoder(connector);
8583
	struct drm_encoder *encoder = &intel_encoder->base;
8606
	struct drm_encoder *encoder = &intel_encoder->base;
8584
	struct drm_crtc *crtc = encoder->crtc;
8607
	struct drm_crtc *crtc = encoder->crtc;
8585
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8608
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8586
 
8609
 
8587
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8610
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8588
		      connector->base.id, connector->name,
8611
		      connector->base.id, connector->name,
8589
		      encoder->base.id, encoder->name);
8612
		      encoder->base.id, encoder->name);
8590
 
8613
 
8591
	if (old->load_detect_temp) {
8614
	if (old->load_detect_temp) {
8592
		to_intel_connector(connector)->new_encoder = NULL;
8615
		to_intel_connector(connector)->new_encoder = NULL;
8593
		intel_encoder->new_crtc = NULL;
8616
		intel_encoder->new_crtc = NULL;
8594
		intel_crtc->new_enabled = false;
8617
		intel_crtc->new_enabled = false;
8595
		intel_crtc->new_config = NULL;
8618
		intel_crtc->new_config = NULL;
8596
		intel_set_mode(crtc, NULL, 0, 0, NULL);
8619
		intel_set_mode(crtc, NULL, 0, 0, NULL);
8597
 
8620
 
8598
		if (old->release_fb) {
8621
		if (old->release_fb) {
8599
			drm_framebuffer_unregister_private(old->release_fb);
8622
			drm_framebuffer_unregister_private(old->release_fb);
8600
			drm_framebuffer_unreference(old->release_fb);
8623
			drm_framebuffer_unreference(old->release_fb);
8601
		}
8624
		}
8602
 
8625
 
8603
		return;
8626
		return;
8604
	}
8627
	}
8605
 
8628
 
8606
	/* Switch crtc and encoder back off if necessary */
8629
	/* Switch crtc and encoder back off if necessary */
8607
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
8630
	if (old->dpms_mode != DRM_MODE_DPMS_ON)
8608
		connector->funcs->dpms(connector, old->dpms_mode);
8631
		connector->funcs->dpms(connector, old->dpms_mode);
8609
}
8632
}
8610
 
8633
 
8611
static int i9xx_pll_refclk(struct drm_device *dev,
8634
static int i9xx_pll_refclk(struct drm_device *dev,
8612
			   const struct intel_crtc_config *pipe_config)
8635
			   const struct intel_crtc_config *pipe_config)
8613
{
8636
{
8614
	struct drm_i915_private *dev_priv = dev->dev_private;
8637
	struct drm_i915_private *dev_priv = dev->dev_private;
8615
	u32 dpll = pipe_config->dpll_hw_state.dpll;
8638
	u32 dpll = pipe_config->dpll_hw_state.dpll;
8616
 
8639
 
8617
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8640
	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8618
		return dev_priv->vbt.lvds_ssc_freq;
8641
		return dev_priv->vbt.lvds_ssc_freq;
8619
	else if (HAS_PCH_SPLIT(dev))
8642
	else if (HAS_PCH_SPLIT(dev))
8620
		return 120000;
8643
		return 120000;
8621
	else if (!IS_GEN2(dev))
8644
	else if (!IS_GEN2(dev))
8622
		return 96000;
8645
		return 96000;
8623
	else
8646
	else
8624
		return 48000;
8647
		return 48000;
8625
}
8648
}
8626
 
8649
 
8627
/* Returns the clock of the currently programmed mode of the given pipe. */
8650
/* Returns the clock of the currently programmed mode of the given pipe. */
8628
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8651
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8629
				struct intel_crtc_config *pipe_config)
8652
				struct intel_crtc_config *pipe_config)
8630
{
8653
{
8631
	struct drm_device *dev = crtc->base.dev;
8654
	struct drm_device *dev = crtc->base.dev;
8632
	struct drm_i915_private *dev_priv = dev->dev_private;
8655
	struct drm_i915_private *dev_priv = dev->dev_private;
8633
	int pipe = pipe_config->cpu_transcoder;
8656
	int pipe = pipe_config->cpu_transcoder;
8634
	u32 dpll = pipe_config->dpll_hw_state.dpll;
8657
	u32 dpll = pipe_config->dpll_hw_state.dpll;
8635
	u32 fp;
8658
	u32 fp;
8636
	intel_clock_t clock;
8659
	intel_clock_t clock;
8637
	int refclk = i9xx_pll_refclk(dev, pipe_config);
8660
	int refclk = i9xx_pll_refclk(dev, pipe_config);
8638
 
8661
 
8639
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
8662
	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
8640
		fp = pipe_config->dpll_hw_state.fp0;
8663
		fp = pipe_config->dpll_hw_state.fp0;
8641
	else
8664
	else
8642
		fp = pipe_config->dpll_hw_state.fp1;
8665
		fp = pipe_config->dpll_hw_state.fp1;
8643
 
8666
 
8644
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8667
	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8645
	if (IS_PINEVIEW(dev)) {
8668
	if (IS_PINEVIEW(dev)) {
8646
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8669
		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8647
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8670
		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8648
	} else {
8671
	} else {
8649
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8672
		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8650
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8673
		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8651
	}
8674
	}
8652
 
8675
 
8653
	if (!IS_GEN2(dev)) {
8676
	if (!IS_GEN2(dev)) {
8654
		if (IS_PINEVIEW(dev))
8677
		if (IS_PINEVIEW(dev))
8655
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8678
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8656
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8679
				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8657
		else
8680
		else
8658
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8681
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8659
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
8682
			       DPLL_FPA01_P1_POST_DIV_SHIFT);
8660
 
8683
 
8661
		switch (dpll & DPLL_MODE_MASK) {
8684
		switch (dpll & DPLL_MODE_MASK) {
8662
		case DPLLB_MODE_DAC_SERIAL:
8685
		case DPLLB_MODE_DAC_SERIAL:
8663
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8686
			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8664
				5 : 10;
8687
				5 : 10;
8665
			break;
8688
			break;
8666
		case DPLLB_MODE_LVDS:
8689
		case DPLLB_MODE_LVDS:
8667
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8690
			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8668
				7 : 14;
8691
				7 : 14;
8669
			break;
8692
			break;
8670
		default:
8693
		default:
8671
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
8694
			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
8672
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
8695
				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
8673
			return;
8696
			return;
8674
		}
8697
		}
8675
 
8698
 
8676
		if (IS_PINEVIEW(dev))
8699
		if (IS_PINEVIEW(dev))
8677
			pineview_clock(refclk, &clock);
8700
			pineview_clock(refclk, &clock);
8678
		else
8701
		else
8679
			i9xx_clock(refclk, &clock);
8702
			i9xx_clock(refclk, &clock);
8680
	} else {
8703
	} else {
8681
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8704
		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8682
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8705
		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8683
 
8706
 
8684
		if (is_lvds) {
8707
		if (is_lvds) {
8685
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8708
			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8686
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
8709
				       DPLL_FPA01_P1_POST_DIV_SHIFT);
8687
 
8710
 
8688
			if (lvds & LVDS_CLKB_POWER_UP)
8711
			if (lvds & LVDS_CLKB_POWER_UP)
8689
				clock.p2 = 7;
8712
				clock.p2 = 7;
8690
			else
8713
			else
8691
			clock.p2 = 14;
8714
			clock.p2 = 14;
8692
		} else {
8715
		} else {
8693
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
8716
			if (dpll & PLL_P1_DIVIDE_BY_TWO)
8694
				clock.p1 = 2;
8717
				clock.p1 = 2;
8695
			else {
8718
			else {
8696
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8719
				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8697
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8720
					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8698
			}
8721
			}
8699
			if (dpll & PLL_P2_DIVIDE_BY_4)
8722
			if (dpll & PLL_P2_DIVIDE_BY_4)
8700
				clock.p2 = 4;
8723
				clock.p2 = 4;
8701
			else
8724
			else
8702
				clock.p2 = 2;
8725
				clock.p2 = 2;
8703
		}
8726
		}
8704
 
8727
 
8705
		i9xx_clock(refclk, &clock);
8728
		i9xx_clock(refclk, &clock);
8706
	}
8729
	}
8707
 
8730
 
8708
	/*
8731
	/*
8709
	 * This value includes pixel_multiplier. We will use
8732
	 * This value includes pixel_multiplier. We will use
8710
	 * port_clock to compute adjusted_mode.crtc_clock in the
8733
	 * port_clock to compute adjusted_mode.crtc_clock in the
8711
	 * encoder's get_config() function.
8734
	 * encoder's get_config() function.
8712
	 */
8735
	 */
8713
	pipe_config->port_clock = clock.dot;
8736
	pipe_config->port_clock = clock.dot;
8714
}
8737
}
8715
 
8738
 
8716
int intel_dotclock_calculate(int link_freq,
8739
int intel_dotclock_calculate(int link_freq,
8717
			     const struct intel_link_m_n *m_n)
8740
			     const struct intel_link_m_n *m_n)
8718
{
8741
{
8719
	/*
8742
	/*
8720
	 * The calculation for the data clock is:
8743
	 * The calculation for the data clock is:
8721
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
8744
	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
8722
	 * But we want to avoid losing precison if possible, so:
8745
	 * But we want to avoid losing precison if possible, so:
8723
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
8746
	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
8724
	 *
8747
	 *
8725
	 * and the link clock is simpler:
8748
	 * and the link clock is simpler:
8726
	 * link_clock = (m * link_clock) / n
8749
	 * link_clock = (m * link_clock) / n
8727
	 */
8750
	 */
8728
 
8751
 
8729
	if (!m_n->link_n)
8752
	if (!m_n->link_n)
8730
		return 0;
8753
		return 0;
8731
 
8754
 
8732
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8755
	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8733
}
8756
}
8734
 
8757
 
8735
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8758
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8736
				   struct intel_crtc_config *pipe_config)
8759
				   struct intel_crtc_config *pipe_config)
8737
{
8760
{
8738
	struct drm_device *dev = crtc->base.dev;
8761
	struct drm_device *dev = crtc->base.dev;
8739
 
8762
 
8740
	/* read out port_clock from the DPLL */
8763
	/* read out port_clock from the DPLL */
8741
	i9xx_crtc_clock_get(crtc, pipe_config);
8764
	i9xx_crtc_clock_get(crtc, pipe_config);
8742
 
8765
 
8743
	/*
8766
	/*
8744
	 * This value does not include pixel_multiplier.
8767
	 * This value does not include pixel_multiplier.
8745
	 * We will check that port_clock and adjusted_mode.crtc_clock
8768
	 * We will check that port_clock and adjusted_mode.crtc_clock
8746
	 * agree once we know their relationship in the encoder's
8769
	 * agree once we know their relationship in the encoder's
8747
	 * get_config() function.
8770
	 * get_config() function.
8748
	 */
8771
	 */
8749
	pipe_config->adjusted_mode.crtc_clock =
8772
	pipe_config->adjusted_mode.crtc_clock =
8750
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8773
		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8751
					 &pipe_config->fdi_m_n);
8774
					 &pipe_config->fdi_m_n);
8752
}
8775
}
8753
 
8776
 
8754
/** Returns the currently programmed mode of the given pipe. */
8777
/** Returns the currently programmed mode of the given pipe. */
8755
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8778
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8756
					     struct drm_crtc *crtc)
8779
					     struct drm_crtc *crtc)
8757
{
8780
{
8758
	struct drm_i915_private *dev_priv = dev->dev_private;
8781
	struct drm_i915_private *dev_priv = dev->dev_private;
8759
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8782
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8760
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
8783
	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
8761
	struct drm_display_mode *mode;
8784
	struct drm_display_mode *mode;
8762
	struct intel_crtc_config pipe_config;
8785
	struct intel_crtc_config pipe_config;
8763
	int htot = I915_READ(HTOTAL(cpu_transcoder));
8786
	int htot = I915_READ(HTOTAL(cpu_transcoder));
8764
	int hsync = I915_READ(HSYNC(cpu_transcoder));
8787
	int hsync = I915_READ(HSYNC(cpu_transcoder));
8765
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
8788
	int vtot = I915_READ(VTOTAL(cpu_transcoder));
8766
	int vsync = I915_READ(VSYNC(cpu_transcoder));
8789
	int vsync = I915_READ(VSYNC(cpu_transcoder));
8767
	enum pipe pipe = intel_crtc->pipe;
8790
	enum pipe pipe = intel_crtc->pipe;
8768
 
8791
 
8769
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8792
	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8770
	if (!mode)
8793
	if (!mode)
8771
		return NULL;
8794
		return NULL;
8772
 
8795
 
8773
	/*
8796
	/*
8774
	 * Construct a pipe_config sufficient for getting the clock info
8797
	 * Construct a pipe_config sufficient for getting the clock info
8775
	 * back out of crtc_clock_get.
8798
	 * back out of crtc_clock_get.
8776
	 *
8799
	 *
8777
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8800
	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8778
	 * to use a real value here instead.
8801
	 * to use a real value here instead.
8779
	 */
8802
	 */
8780
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
8803
	pipe_config.cpu_transcoder = (enum transcoder) pipe;
8781
	pipe_config.pixel_multiplier = 1;
8804
	pipe_config.pixel_multiplier = 1;
8782
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8805
	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8783
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8806
	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8784
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
8807
	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
8785
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8808
	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8786
 
8809
 
8787
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
8810
	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
8788
	mode->hdisplay = (htot & 0xffff) + 1;
8811
	mode->hdisplay = (htot & 0xffff) + 1;
8789
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8812
	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8790
	mode->hsync_start = (hsync & 0xffff) + 1;
8813
	mode->hsync_start = (hsync & 0xffff) + 1;
8791
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8814
	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8792
	mode->vdisplay = (vtot & 0xffff) + 1;
8815
	mode->vdisplay = (vtot & 0xffff) + 1;
8793
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8816
	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8794
	mode->vsync_start = (vsync & 0xffff) + 1;
8817
	mode->vsync_start = (vsync & 0xffff) + 1;
8795
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8818
	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8796
 
8819
 
8797
	drm_mode_set_name(mode);
8820
	drm_mode_set_name(mode);
8798
 
8821
 
8799
	return mode;
8822
	return mode;
8800
}
8823
}
8801
 
8824
 
8802
static void intel_increase_pllclock(struct drm_device *dev,
8825
static void intel_increase_pllclock(struct drm_device *dev,
8803
				    enum pipe pipe)
8826
				    enum pipe pipe)
8804
{
8827
{
8805
	struct drm_i915_private *dev_priv = dev->dev_private;
8828
	struct drm_i915_private *dev_priv = dev->dev_private;
8806
	int dpll_reg = DPLL(pipe);
8829
	int dpll_reg = DPLL(pipe);
8807
	int dpll;
8830
	int dpll;
8808
 
8831
 
8809
	if (!HAS_GMCH_DISPLAY(dev))
8832
	if (!HAS_GMCH_DISPLAY(dev))
8810
		return;
8833
		return;
8811
 
8834
 
8812
	if (!dev_priv->lvds_downclock_avail)
8835
	if (!dev_priv->lvds_downclock_avail)
8813
		return;
8836
		return;
8814
 
8837
 
8815
	dpll = I915_READ(dpll_reg);
8838
	dpll = I915_READ(dpll_reg);
8816
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
8839
	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
8817
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
8840
		DRM_DEBUG_DRIVER("upclocking LVDS\n");
8818
 
8841
 
8819
		assert_panel_unlocked(dev_priv, pipe);
8842
		assert_panel_unlocked(dev_priv, pipe);
8820
 
8843
 
8821
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
8844
		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
8822
		I915_WRITE(dpll_reg, dpll);
8845
		I915_WRITE(dpll_reg, dpll);
8823
		intel_wait_for_vblank(dev, pipe);
8846
		intel_wait_for_vblank(dev, pipe);
8824
 
8847
 
8825
		dpll = I915_READ(dpll_reg);
8848
		dpll = I915_READ(dpll_reg);
8826
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
8849
		if (dpll & DISPLAY_RATE_SELECT_FPA1)
8827
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
8850
			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
8828
	}
8851
	}
8829
}
8852
}
8830
 
8853
 
8831
static void intel_decrease_pllclock(struct drm_crtc *crtc)
8854
static void intel_decrease_pllclock(struct drm_crtc *crtc)
8832
{
8855
{
8833
	struct drm_device *dev = crtc->dev;
8856
	struct drm_device *dev = crtc->dev;
8834
	struct drm_i915_private *dev_priv = dev->dev_private;
8857
	struct drm_i915_private *dev_priv = dev->dev_private;
8835
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8858
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8836
 
8859
 
8837
	if (!HAS_GMCH_DISPLAY(dev))
8860
	if (!HAS_GMCH_DISPLAY(dev))
8838
		return;
8861
		return;
8839
 
8862
 
8840
	if (!dev_priv->lvds_downclock_avail)
8863
	if (!dev_priv->lvds_downclock_avail)
8841
		return;
8864
		return;
8842
 
8865
 
8843
	/*
8866
	/*
8844
	 * Since this is called by a timer, we should never get here in
8867
	 * Since this is called by a timer, we should never get here in
8845
	 * the manual case.
8868
	 * the manual case.
8846
	 */
8869
	 */
8847
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
8870
	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
8848
		int pipe = intel_crtc->pipe;
8871
		int pipe = intel_crtc->pipe;
8849
		int dpll_reg = DPLL(pipe);
8872
		int dpll_reg = DPLL(pipe);
8850
		int dpll;
8873
		int dpll;
8851
 
8874
 
8852
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
8875
		DRM_DEBUG_DRIVER("downclocking LVDS\n");
8853
 
8876
 
8854
		assert_panel_unlocked(dev_priv, pipe);
8877
		assert_panel_unlocked(dev_priv, pipe);
8855
 
8878
 
8856
		dpll = I915_READ(dpll_reg);
8879
		dpll = I915_READ(dpll_reg);
8857
		dpll |= DISPLAY_RATE_SELECT_FPA1;
8880
		dpll |= DISPLAY_RATE_SELECT_FPA1;
8858
		I915_WRITE(dpll_reg, dpll);
8881
		I915_WRITE(dpll_reg, dpll);
8859
		intel_wait_for_vblank(dev, pipe);
8882
		intel_wait_for_vblank(dev, pipe);
8860
		dpll = I915_READ(dpll_reg);
8883
		dpll = I915_READ(dpll_reg);
8861
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
8884
		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
8862
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
8885
			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
8863
	}
8886
	}
8864
 
8887
 
8865
}
8888
}
8866
 
8889
 
8867
void intel_mark_busy(struct drm_device *dev)
8890
void intel_mark_busy(struct drm_device *dev)
8868
{
8891
{
8869
	struct drm_i915_private *dev_priv = dev->dev_private;
8892
	struct drm_i915_private *dev_priv = dev->dev_private;
8870
 
8893
 
8871
	if (dev_priv->mm.busy)
8894
	if (dev_priv->mm.busy)
8872
		return;
8895
		return;
8873
 
8896
 
8874
	intel_runtime_pm_get(dev_priv);
8897
	intel_runtime_pm_get(dev_priv);
8875
	i915_update_gfx_val(dev_priv);
8898
	i915_update_gfx_val(dev_priv);
8876
	dev_priv->mm.busy = true;
8899
	dev_priv->mm.busy = true;
8877
}
8900
}
8878
 
8901
 
8879
void intel_mark_idle(struct drm_device *dev)
8902
void intel_mark_idle(struct drm_device *dev)
8880
{
8903
{
8881
	struct drm_i915_private *dev_priv = dev->dev_private;
8904
	struct drm_i915_private *dev_priv = dev->dev_private;
8882
	struct drm_crtc *crtc;
8905
	struct drm_crtc *crtc;
8883
 
8906
 
8884
	if (!dev_priv->mm.busy)
8907
	if (!dev_priv->mm.busy)
8885
		return;
8908
		return;
8886
 
8909
 
8887
	dev_priv->mm.busy = false;
8910
	dev_priv->mm.busy = false;
8888
 
8911
 
8889
	if (!i915.powersave)
8912
	if (!i915.powersave)
8890
		goto out;
8913
		goto out;
8891
 
8914
 
8892
	for_each_crtc(dev, crtc) {
8915
	for_each_crtc(dev, crtc) {
8893
		if (!crtc->primary->fb)
8916
		if (!crtc->primary->fb)
8894
			continue;
8917
			continue;
8895
 
8918
 
8896
		intel_decrease_pllclock(crtc);
8919
		intel_decrease_pllclock(crtc);
8897
	}
8920
	}
8898
 
8921
 
8899
	if (INTEL_INFO(dev)->gen >= 6)
8922
	if (INTEL_INFO(dev)->gen >= 6)
8900
		gen6_rps_idle(dev->dev_private);
8923
		gen6_rps_idle(dev->dev_private);
8901
 
8924
 
8902
out:
8925
out:
8903
	intel_runtime_pm_put(dev_priv);
8926
	intel_runtime_pm_put(dev_priv);
8904
}
8927
}
8905
 
8928
 
8906
 
8929
 
8907
/**
8930
/**
8908
 * intel_mark_fb_busy - mark given planes as busy
8931
 * intel_mark_fb_busy - mark given planes as busy
8909
 * @dev: DRM device
8932
 * @dev: DRM device
8910
 * @frontbuffer_bits: bits for the affected planes
8933
 * @frontbuffer_bits: bits for the affected planes
8911
 * @ring: optional ring for asynchronous commands
8934
 * @ring: optional ring for asynchronous commands
8912
 *
8935
 *
8913
 * This function gets called every time the screen contents change. It can be
8936
 * This function gets called every time the screen contents change. It can be
8914
 * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
8937
 * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
8915
 */
8938
 */
8916
static void intel_mark_fb_busy(struct drm_device *dev,
8939
static void intel_mark_fb_busy(struct drm_device *dev,
8917
			       unsigned frontbuffer_bits,
8940
			       unsigned frontbuffer_bits,
8918
			struct intel_engine_cs *ring)
8941
			struct intel_engine_cs *ring)
8919
{
8942
{
8920
	enum pipe pipe;
8943
	enum pipe pipe;
8921
 
8944
 
8922
	if (!i915.powersave)
8945
	if (!i915.powersave)
8923
		return;
8946
		return;
8924
 
8947
 
8925
	for_each_pipe(pipe) {
8948
	for_each_pipe(pipe) {
8926
		if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
8949
		if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
8927
			continue;
8950
			continue;
8928
 
8951
 
8929
		intel_increase_pllclock(dev, pipe);
8952
		intel_increase_pllclock(dev, pipe);
8930
		if (ring && intel_fbc_enabled(dev))
8953
		if (ring && intel_fbc_enabled(dev))
8931
			ring->fbc_dirty = true;
8954
			ring->fbc_dirty = true;
8932
	}
8955
	}
8933
}
8956
}
8934
 
8957
 
8935
/**
8958
/**
8936
 * intel_fb_obj_invalidate - invalidate frontbuffer object
8959
 * intel_fb_obj_invalidate - invalidate frontbuffer object
8937
 * @obj: GEM object to invalidate
8960
 * @obj: GEM object to invalidate
8938
 * @ring: set for asynchronous rendering
8961
 * @ring: set for asynchronous rendering
8939
 *
8962
 *
8940
 * This function gets called every time rendering on the given object starts and
8963
 * This function gets called every time rendering on the given object starts and
8941
 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
8964
 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
8942
 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
8965
 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
8943
 * until the rendering completes or a flip on this frontbuffer plane is
8966
 * until the rendering completes or a flip on this frontbuffer plane is
8944
 * scheduled.
8967
 * scheduled.
8945
 */
8968
 */
8946
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
8969
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
8947
			     struct intel_engine_cs *ring)
8970
			     struct intel_engine_cs *ring)
8948
{
8971
{
8949
	struct drm_device *dev = obj->base.dev;
8972
	struct drm_device *dev = obj->base.dev;
8950
	struct drm_i915_private *dev_priv = dev->dev_private;
8973
	struct drm_i915_private *dev_priv = dev->dev_private;
8951
 
8974
 
8952
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8975
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8953
 
8976
 
8954
	if (!obj->frontbuffer_bits)
8977
	if (!obj->frontbuffer_bits)
8955
		return;
8978
		return;
8956
 
8979
 
8957
	if (ring) {
8980
	if (ring) {
8958
		mutex_lock(&dev_priv->fb_tracking.lock);
8981
		mutex_lock(&dev_priv->fb_tracking.lock);
8959
		dev_priv->fb_tracking.busy_bits
8982
		dev_priv->fb_tracking.busy_bits
8960
			|= obj->frontbuffer_bits;
8983
			|= obj->frontbuffer_bits;
8961
		dev_priv->fb_tracking.flip_bits
8984
		dev_priv->fb_tracking.flip_bits
8962
			&= ~obj->frontbuffer_bits;
8985
			&= ~obj->frontbuffer_bits;
8963
		mutex_unlock(&dev_priv->fb_tracking.lock);
8986
		mutex_unlock(&dev_priv->fb_tracking.lock);
8964
	}
8987
	}
8965
 
8988
 
8966
	intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
8989
	intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
8967
 
8990
 
8968
	intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
8991
	intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
8969
}
8992
}
8970
 
8993
 
8971
/**
8994
/**
8972
 * intel_frontbuffer_flush - flush frontbuffer
8995
 * intel_frontbuffer_flush - flush frontbuffer
8973
 * @dev: DRM device
8996
 * @dev: DRM device
8974
 * @frontbuffer_bits: frontbuffer plane tracking bits
8997
 * @frontbuffer_bits: frontbuffer plane tracking bits
8975
 *
8998
 *
8976
 * This function gets called every time rendering on the given planes has
8999
 * This function gets called every time rendering on the given planes has
8977
 * completed and frontbuffer caching can be started again. Flushes will get
9000
 * completed and frontbuffer caching can be started again. Flushes will get
8978
 * delayed if they're blocked by some oustanding asynchronous rendering.
9001
 * delayed if they're blocked by some oustanding asynchronous rendering.
8979
 *
9002
 *
8980
 * Can be called without any locks held.
9003
 * Can be called without any locks held.
8981
 */
9004
 */
8982
void intel_frontbuffer_flush(struct drm_device *dev,
9005
void intel_frontbuffer_flush(struct drm_device *dev,
8983
			     unsigned frontbuffer_bits)
9006
			     unsigned frontbuffer_bits)
8984
{
9007
{
8985
	struct drm_i915_private *dev_priv = dev->dev_private;
9008
	struct drm_i915_private *dev_priv = dev->dev_private;
8986
 
9009
 
8987
	/* Delay flushing when rings are still busy.*/
9010
	/* Delay flushing when rings are still busy.*/
8988
	mutex_lock(&dev_priv->fb_tracking.lock);
9011
	mutex_lock(&dev_priv->fb_tracking.lock);
8989
	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
9012
	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
8990
	mutex_unlock(&dev_priv->fb_tracking.lock);
9013
	mutex_unlock(&dev_priv->fb_tracking.lock);
8991
 
9014
 
8992
	intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
9015
	intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
8993
 
9016
 
8994
	intel_edp_psr_flush(dev, frontbuffer_bits);
9017
	intel_edp_psr_flush(dev, frontbuffer_bits);
8995
}
9018
}
8996
static void intel_crtc_destroy(struct drm_crtc *crtc)
9019
static void intel_crtc_destroy(struct drm_crtc *crtc)
8997
{
9020
{
8998
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9021
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8999
	struct drm_device *dev = crtc->dev;
9022
	struct drm_device *dev = crtc->dev;
9000
	struct intel_unpin_work *work;
9023
	struct intel_unpin_work *work;
9001
	unsigned long flags;
9024
	unsigned long flags;
9002
 
9025
 
9003
	spin_lock_irqsave(&dev->event_lock, flags);
9026
	spin_lock_irqsave(&dev->event_lock, flags);
9004
	work = intel_crtc->unpin_work;
9027
	work = intel_crtc->unpin_work;
9005
	intel_crtc->unpin_work = NULL;
9028
	intel_crtc->unpin_work = NULL;
9006
	spin_unlock_irqrestore(&dev->event_lock, flags);
9029
	spin_unlock_irqrestore(&dev->event_lock, flags);
9007
 
9030
 
9008
	if (work) {
9031
	if (work) {
9009
		cancel_work_sync(&work->work);
9032
		cancel_work_sync(&work->work);
9010
		kfree(work);
9033
		kfree(work);
9011
	}
9034
	}
9012
 
9035
 
9013
	drm_crtc_cleanup(crtc);
9036
	drm_crtc_cleanup(crtc);
9014
 
9037
 
9015
	kfree(intel_crtc);
9038
	kfree(intel_crtc);
9016
}
9039
}
9017
 
9040
 
9018
#if 0
9041
#if 0
9019
static void intel_unpin_work_fn(struct work_struct *__work)
9042
static void intel_unpin_work_fn(struct work_struct *__work)
9020
{
9043
{
9021
	struct intel_unpin_work *work =
9044
	struct intel_unpin_work *work =
9022
		container_of(__work, struct intel_unpin_work, work);
9045
		container_of(__work, struct intel_unpin_work, work);
9023
	struct drm_device *dev = work->crtc->dev;
9046
	struct drm_device *dev = work->crtc->dev;
9024
	enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
9047
	enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
9025
 
9048
 
9026
	mutex_lock(&dev->struct_mutex);
9049
	mutex_lock(&dev->struct_mutex);
9027
	intel_unpin_fb_obj(work->old_fb_obj);
9050
	intel_unpin_fb_obj(work->old_fb_obj);
9028
	drm_gem_object_unreference(&work->pending_flip_obj->base);
9051
	drm_gem_object_unreference(&work->pending_flip_obj->base);
9029
	drm_gem_object_unreference(&work->old_fb_obj->base);
9052
	drm_gem_object_unreference(&work->old_fb_obj->base);
9030
 
9053
 
9031
	intel_update_fbc(dev);
9054
	intel_update_fbc(dev);
9032
	mutex_unlock(&dev->struct_mutex);
9055
	mutex_unlock(&dev->struct_mutex);
9033
 
9056
 
9034
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
9057
	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
9035
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
9058
	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
9036
 
9059
 
9037
	kfree(work);
9060
	kfree(work);
9038
}
9061
}
9039
 
9062
 
9040
static void do_intel_finish_page_flip(struct drm_device *dev,
9063
static void do_intel_finish_page_flip(struct drm_device *dev,
9041
				      struct drm_crtc *crtc)
9064
				      struct drm_crtc *crtc)
9042
{
9065
{
9043
	struct drm_i915_private *dev_priv = dev->dev_private;
9066
	struct drm_i915_private *dev_priv = dev->dev_private;
9044
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9067
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9045
	struct intel_unpin_work *work;
9068
	struct intel_unpin_work *work;
9046
	unsigned long flags;
9069
	unsigned long flags;
9047
 
9070
 
9048
	/* Ignore early vblank irqs */
9071
	/* Ignore early vblank irqs */
9049
	if (intel_crtc == NULL)
9072
	if (intel_crtc == NULL)
9050
		return;
9073
		return;
9051
 
9074
 
9052
	spin_lock_irqsave(&dev->event_lock, flags);
9075
	spin_lock_irqsave(&dev->event_lock, flags);
9053
	work = intel_crtc->unpin_work;
9076
	work = intel_crtc->unpin_work;
9054
 
9077
 
9055
	/* Ensure we don't miss a work->pending update ... */
9078
	/* Ensure we don't miss a work->pending update ... */
9056
	smp_rmb();
9079
	smp_rmb();
9057
 
9080
 
9058
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
9081
	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
9059
		spin_unlock_irqrestore(&dev->event_lock, flags);
9082
		spin_unlock_irqrestore(&dev->event_lock, flags);
9060
		return;
9083
		return;
9061
	}
9084
	}
9062
 
9085
 
9063
	/* and that the unpin work is consistent wrt ->pending. */
9086
	/* and that the unpin work is consistent wrt ->pending. */
9064
	smp_rmb();
9087
	smp_rmb();
9065
 
9088
 
9066
	intel_crtc->unpin_work = NULL;
9089
	intel_crtc->unpin_work = NULL;
9067
 
9090
 
9068
	if (work->event)
9091
	if (work->event)
9069
		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
9092
		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
9070
 
9093
 
9071
	drm_crtc_vblank_put(crtc);
9094
	drm_crtc_vblank_put(crtc);
9072
 
9095
 
9073
	spin_unlock_irqrestore(&dev->event_lock, flags);
9096
	spin_unlock_irqrestore(&dev->event_lock, flags);
9074
 
9097
 
9075
	wake_up_all(&dev_priv->pending_flip_queue);
9098
	wake_up_all(&dev_priv->pending_flip_queue);
9076
 
9099
 
9077
	queue_work(dev_priv->wq, &work->work);
9100
	queue_work(dev_priv->wq, &work->work);
9078
 
9101
 
9079
	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
9102
	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
9080
}
9103
}
9081
 
9104
 
9082
void intel_finish_page_flip(struct drm_device *dev, int pipe)
9105
void intel_finish_page_flip(struct drm_device *dev, int pipe)
9083
{
9106
{
9084
	struct drm_i915_private *dev_priv = dev->dev_private;
9107
	struct drm_i915_private *dev_priv = dev->dev_private;
9085
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9108
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9086
 
9109
 
9087
	do_intel_finish_page_flip(dev, crtc);
9110
	do_intel_finish_page_flip(dev, crtc);
9088
}
9111
}
9089
 
9112
 
9090
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
9113
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
9091
{
9114
{
9092
	struct drm_i915_private *dev_priv = dev->dev_private;
9115
	struct drm_i915_private *dev_priv = dev->dev_private;
9093
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
9116
	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
9094
 
9117
 
9095
	do_intel_finish_page_flip(dev, crtc);
9118
	do_intel_finish_page_flip(dev, crtc);
9096
}
9119
}
9097
 
9120
 
9098
/* Is 'a' after or equal to 'b'? */
9121
/* Is 'a' after or equal to 'b'? */
9099
static bool g4x_flip_count_after_eq(u32 a, u32 b)
9122
static bool g4x_flip_count_after_eq(u32 a, u32 b)
9100
{
9123
{
9101
	return !((a - b) & 0x80000000);
9124
	return !((a - b) & 0x80000000);
9102
}
9125
}
9103
 
9126
 
9104
static bool page_flip_finished(struct intel_crtc *crtc)
9127
static bool page_flip_finished(struct intel_crtc *crtc)
9105
{
9128
{
9106
	struct drm_device *dev = crtc->base.dev;
9129
	struct drm_device *dev = crtc->base.dev;
9107
	struct drm_i915_private *dev_priv = dev->dev_private;
9130
	struct drm_i915_private *dev_priv = dev->dev_private;
9108
 
9131
 
9109
	/*
9132
	/*
9110
	 * The relevant registers doen't exist on pre-ctg.
9133
	 * The relevant registers doen't exist on pre-ctg.
9111
	 * As the flip done interrupt doesn't trigger for mmio
9134
	 * As the flip done interrupt doesn't trigger for mmio
9112
	 * flips on gmch platforms, a flip count check isn't
9135
	 * flips on gmch platforms, a flip count check isn't
9113
	 * really needed there. But since ctg has the registers,
9136
	 * really needed there. But since ctg has the registers,
9114
	 * include it in the check anyway.
9137
	 * include it in the check anyway.
9115
	 */
9138
	 */
9116
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
9139
	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
9117
		return true;
9140
		return true;
9118
 
9141
 
9119
	/*
9142
	/*
9120
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
9143
	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
9121
	 * used the same base address. In that case the mmio flip might
9144
	 * used the same base address. In that case the mmio flip might
9122
	 * have completed, but the CS hasn't even executed the flip yet.
9145
	 * have completed, but the CS hasn't even executed the flip yet.
9123
	 *
9146
	 *
9124
	 * A flip count check isn't enough as the CS might have updated
9147
	 * A flip count check isn't enough as the CS might have updated
9125
	 * the base address just after start of vblank, but before we
9148
	 * the base address just after start of vblank, but before we
9126
	 * managed to process the interrupt. This means we'd complete the
9149
	 * managed to process the interrupt. This means we'd complete the
9127
	 * CS flip too soon.
9150
	 * CS flip too soon.
9128
	 *
9151
	 *
9129
	 * Combining both checks should get us a good enough result. It may
9152
	 * Combining both checks should get us a good enough result. It may
9130
	 * still happen that the CS flip has been executed, but has not
9153
	 * still happen that the CS flip has been executed, but has not
9131
	 * yet actually completed. But in case the base address is the same
9154
	 * yet actually completed. But in case the base address is the same
9132
	 * anyway, we don't really care.
9155
	 * anyway, we don't really care.
9133
	 */
9156
	 */
9134
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
9157
	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
9135
		crtc->unpin_work->gtt_offset &&
9158
		crtc->unpin_work->gtt_offset &&
9136
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
9159
		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
9137
				    crtc->unpin_work->flip_count);
9160
				    crtc->unpin_work->flip_count);
9138
}
9161
}
9139
 
9162
 
9140
void intel_prepare_page_flip(struct drm_device *dev, int plane)
9163
void intel_prepare_page_flip(struct drm_device *dev, int plane)
9141
{
9164
{
9142
	struct drm_i915_private *dev_priv = dev->dev_private;
9165
	struct drm_i915_private *dev_priv = dev->dev_private;
9143
	struct intel_crtc *intel_crtc =
9166
	struct intel_crtc *intel_crtc =
9144
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9167
		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9145
	unsigned long flags;
9168
	unsigned long flags;
9146
 
9169
 
9147
	/* NB: An MMIO update of the plane base pointer will also
9170
	/* NB: An MMIO update of the plane base pointer will also
9148
	 * generate a page-flip completion irq, i.e. every modeset
9171
	 * generate a page-flip completion irq, i.e. every modeset
9149
	 * is also accompanied by a spurious intel_prepare_page_flip().
9172
	 * is also accompanied by a spurious intel_prepare_page_flip().
9150
	 */
9173
	 */
9151
	spin_lock_irqsave(&dev->event_lock, flags);
9174
	spin_lock_irqsave(&dev->event_lock, flags);
9152
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
9175
	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
9153
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
9176
		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
9154
	spin_unlock_irqrestore(&dev->event_lock, flags);
9177
	spin_unlock_irqrestore(&dev->event_lock, flags);
9155
}
9178
}
9156
 
9179
 
9157
static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
9180
static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
9158
{
9181
{
9159
	/* Ensure that the work item is consistent when activating it ... */
9182
	/* Ensure that the work item is consistent when activating it ... */
9160
	smp_wmb();
9183
	smp_wmb();
9161
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
9184
	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
9162
	/* and that it is marked active as soon as the irq could fire. */
9185
	/* and that it is marked active as soon as the irq could fire. */
9163
	smp_wmb();
9186
	smp_wmb();
9164
}
9187
}
9165
 
9188
 
9166
static int intel_gen2_queue_flip(struct drm_device *dev,
9189
static int intel_gen2_queue_flip(struct drm_device *dev,
9167
				 struct drm_crtc *crtc,
9190
				 struct drm_crtc *crtc,
9168
				 struct drm_framebuffer *fb,
9191
				 struct drm_framebuffer *fb,
9169
				 struct drm_i915_gem_object *obj,
9192
				 struct drm_i915_gem_object *obj,
9170
				 struct intel_engine_cs *ring,
9193
				 struct intel_engine_cs *ring,
9171
				 uint32_t flags)
9194
				 uint32_t flags)
9172
{
9195
{
9173
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9196
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9174
	u32 flip_mask;
9197
	u32 flip_mask;
9175
	int ret;
9198
	int ret;
9176
 
9199
 
9177
	ret = intel_ring_begin(ring, 6);
9200
	ret = intel_ring_begin(ring, 6);
9178
	if (ret)
9201
	if (ret)
9179
		return ret;
9202
		return ret;
9180
 
9203
 
9181
	/* Can't queue multiple flips, so wait for the previous
9204
	/* Can't queue multiple flips, so wait for the previous
9182
	 * one to finish before executing the next.
9205
	 * one to finish before executing the next.
9183
	 */
9206
	 */
9184
	if (intel_crtc->plane)
9207
	if (intel_crtc->plane)
9185
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9208
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9186
	else
9209
	else
9187
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9210
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9188
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9211
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9189
	intel_ring_emit(ring, MI_NOOP);
9212
	intel_ring_emit(ring, MI_NOOP);
9190
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9213
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9191
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9214
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9192
	intel_ring_emit(ring, fb->pitches[0]);
9215
	intel_ring_emit(ring, fb->pitches[0]);
9193
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9216
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9194
	intel_ring_emit(ring, 0); /* aux display base address, unused */
9217
	intel_ring_emit(ring, 0); /* aux display base address, unused */
9195
 
9218
 
9196
	intel_mark_page_flip_active(intel_crtc);
9219
	intel_mark_page_flip_active(intel_crtc);
9197
	__intel_ring_advance(ring);
9220
	__intel_ring_advance(ring);
9198
	return 0;
9221
	return 0;
9199
}
9222
}
9200
 
9223
 
9201
static int intel_gen3_queue_flip(struct drm_device *dev,
9224
static int intel_gen3_queue_flip(struct drm_device *dev,
9202
				 struct drm_crtc *crtc,
9225
				 struct drm_crtc *crtc,
9203
				 struct drm_framebuffer *fb,
9226
				 struct drm_framebuffer *fb,
9204
				 struct drm_i915_gem_object *obj,
9227
				 struct drm_i915_gem_object *obj,
9205
				 struct intel_engine_cs *ring,
9228
				 struct intel_engine_cs *ring,
9206
				 uint32_t flags)
9229
				 uint32_t flags)
9207
{
9230
{
9208
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9231
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9209
	u32 flip_mask;
9232
	u32 flip_mask;
9210
	int ret;
9233
	int ret;
9211
 
9234
 
9212
	ret = intel_ring_begin(ring, 6);
9235
	ret = intel_ring_begin(ring, 6);
9213
	if (ret)
9236
	if (ret)
9214
		return ret;
9237
		return ret;
9215
 
9238
 
9216
	if (intel_crtc->plane)
9239
	if (intel_crtc->plane)
9217
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9240
		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9218
	else
9241
	else
9219
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9242
		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9220
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9243
	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9221
	intel_ring_emit(ring, MI_NOOP);
9244
	intel_ring_emit(ring, MI_NOOP);
9222
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
9245
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
9223
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9246
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9224
	intel_ring_emit(ring, fb->pitches[0]);
9247
	intel_ring_emit(ring, fb->pitches[0]);
9225
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9248
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9226
	intel_ring_emit(ring, MI_NOOP);
9249
	intel_ring_emit(ring, MI_NOOP);
9227
 
9250
 
9228
	intel_mark_page_flip_active(intel_crtc);
9251
	intel_mark_page_flip_active(intel_crtc);
9229
	__intel_ring_advance(ring);
9252
	__intel_ring_advance(ring);
9230
	return 0;
9253
	return 0;
9231
}
9254
}
9232
 
9255
 
9233
static int intel_gen4_queue_flip(struct drm_device *dev,
9256
static int intel_gen4_queue_flip(struct drm_device *dev,
9234
				 struct drm_crtc *crtc,
9257
				 struct drm_crtc *crtc,
9235
				 struct drm_framebuffer *fb,
9258
				 struct drm_framebuffer *fb,
9236
				 struct drm_i915_gem_object *obj,
9259
				 struct drm_i915_gem_object *obj,
9237
				 struct intel_engine_cs *ring,
9260
				 struct intel_engine_cs *ring,
9238
				 uint32_t flags)
9261
				 uint32_t flags)
9239
{
9262
{
9240
	struct drm_i915_private *dev_priv = dev->dev_private;
9263
	struct drm_i915_private *dev_priv = dev->dev_private;
9241
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9264
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9242
	uint32_t pf, pipesrc;
9265
	uint32_t pf, pipesrc;
9243
	int ret;
9266
	int ret;
9244
 
9267
 
9245
	ret = intel_ring_begin(ring, 4);
9268
	ret = intel_ring_begin(ring, 4);
9246
	if (ret)
9269
	if (ret)
9247
		return ret;
9270
		return ret;
9248
 
9271
 
9249
	/* i965+ uses the linear or tiled offsets from the
9272
	/* i965+ uses the linear or tiled offsets from the
9250
	 * Display Registers (which do not change across a page-flip)
9273
	 * Display Registers (which do not change across a page-flip)
9251
	 * so we need only reprogram the base address.
9274
	 * so we need only reprogram the base address.
9252
	 */
9275
	 */
9253
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9276
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9254
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9277
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9255
	intel_ring_emit(ring, fb->pitches[0]);
9278
	intel_ring_emit(ring, fb->pitches[0]);
9256
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
9279
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
9257
			obj->tiling_mode);
9280
			obj->tiling_mode);
9258
 
9281
 
9259
	/* XXX Enabling the panel-fitter across page-flip is so far
9282
	/* XXX Enabling the panel-fitter across page-flip is so far
9260
	 * untested on non-native modes, so ignore it for now.
9283
	 * untested on non-native modes, so ignore it for now.
9261
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
9284
	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
9262
	 */
9285
	 */
9263
	pf = 0;
9286
	pf = 0;
9264
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9287
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9265
	intel_ring_emit(ring, pf | pipesrc);
9288
	intel_ring_emit(ring, pf | pipesrc);
9266
 
9289
 
9267
	intel_mark_page_flip_active(intel_crtc);
9290
	intel_mark_page_flip_active(intel_crtc);
9268
	__intel_ring_advance(ring);
9291
	__intel_ring_advance(ring);
9269
	return 0;
9292
	return 0;
9270
}
9293
}
9271
 
9294
 
9272
static int intel_gen6_queue_flip(struct drm_device *dev,
9295
static int intel_gen6_queue_flip(struct drm_device *dev,
9273
				 struct drm_crtc *crtc,
9296
				 struct drm_crtc *crtc,
9274
				 struct drm_framebuffer *fb,
9297
				 struct drm_framebuffer *fb,
9275
				 struct drm_i915_gem_object *obj,
9298
				 struct drm_i915_gem_object *obj,
9276
				 struct intel_engine_cs *ring,
9299
				 struct intel_engine_cs *ring,
9277
				 uint32_t flags)
9300
				 uint32_t flags)
9278
{
9301
{
9279
	struct drm_i915_private *dev_priv = dev->dev_private;
9302
	struct drm_i915_private *dev_priv = dev->dev_private;
9280
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9303
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9281
	uint32_t pf, pipesrc;
9304
	uint32_t pf, pipesrc;
9282
	int ret;
9305
	int ret;
9283
 
9306
 
9284
	ret = intel_ring_begin(ring, 4);
9307
	ret = intel_ring_begin(ring, 4);
9285
	if (ret)
9308
	if (ret)
9286
		return ret;
9309
		return ret;
9287
 
9310
 
9288
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9311
	intel_ring_emit(ring, MI_DISPLAY_FLIP |
9289
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9312
			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9290
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
9313
	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
9291
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9314
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9292
 
9315
 
9293
	/* Contrary to the suggestions in the documentation,
9316
	/* Contrary to the suggestions in the documentation,
9294
	 * "Enable Panel Fitter" does not seem to be required when page
9317
	 * "Enable Panel Fitter" does not seem to be required when page
9295
	 * flipping with a non-native mode, and worse causes a normal
9318
	 * flipping with a non-native mode, and worse causes a normal
9296
	 * modeset to fail.
9319
	 * modeset to fail.
9297
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
9320
	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
9298
	 */
9321
	 */
9299
	pf = 0;
9322
	pf = 0;
9300
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9323
	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9301
	intel_ring_emit(ring, pf | pipesrc);
9324
	intel_ring_emit(ring, pf | pipesrc);
9302
 
9325
 
9303
	intel_mark_page_flip_active(intel_crtc);
9326
	intel_mark_page_flip_active(intel_crtc);
9304
	__intel_ring_advance(ring);
9327
	__intel_ring_advance(ring);
9305
	return 0;
9328
	return 0;
9306
}
9329
}
9307
 
9330
 
9308
static int intel_gen7_queue_flip(struct drm_device *dev,
9331
static int intel_gen7_queue_flip(struct drm_device *dev,
9309
				 struct drm_crtc *crtc,
9332
				 struct drm_crtc *crtc,
9310
				 struct drm_framebuffer *fb,
9333
				 struct drm_framebuffer *fb,
9311
				 struct drm_i915_gem_object *obj,
9334
				 struct drm_i915_gem_object *obj,
9312
				 struct intel_engine_cs *ring,
9335
				 struct intel_engine_cs *ring,
9313
				 uint32_t flags)
9336
				 uint32_t flags)
9314
{
9337
{
9315
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9338
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9316
	uint32_t plane_bit = 0;
9339
	uint32_t plane_bit = 0;
9317
	int len, ret;
9340
	int len, ret;
9318
 
9341
 
9319
	switch (intel_crtc->plane) {
9342
	switch (intel_crtc->plane) {
9320
	case PLANE_A:
9343
	case PLANE_A:
9321
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
9344
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
9322
		break;
9345
		break;
9323
	case PLANE_B:
9346
	case PLANE_B:
9324
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
9347
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
9325
		break;
9348
		break;
9326
	case PLANE_C:
9349
	case PLANE_C:
9327
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
9350
		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
9328
		break;
9351
		break;
9329
	default:
9352
	default:
9330
		WARN_ONCE(1, "unknown plane in flip command\n");
9353
		WARN_ONCE(1, "unknown plane in flip command\n");
9331
		return -ENODEV;
9354
		return -ENODEV;
9332
	}
9355
	}
9333
 
9356
 
9334
	len = 4;
9357
	len = 4;
9335
	if (ring->id == RCS) {
9358
	if (ring->id == RCS) {
9336
		len += 6;
9359
		len += 6;
9337
		/*
9360
		/*
9338
		 * On Gen 8, SRM is now taking an extra dword to accommodate
9361
		 * On Gen 8, SRM is now taking an extra dword to accommodate
9339
		 * 48bits addresses, and we need a NOOP for the batch size to
9362
		 * 48bits addresses, and we need a NOOP for the batch size to
9340
		 * stay even.
9363
		 * stay even.
9341
		 */
9364
		 */
9342
		if (IS_GEN8(dev))
9365
		if (IS_GEN8(dev))
9343
			len += 2;
9366
			len += 2;
9344
	}
9367
	}
9345
 
9368
 
9346
	/*
9369
	/*
9347
	 * BSpec MI_DISPLAY_FLIP for IVB:
9370
	 * BSpec MI_DISPLAY_FLIP for IVB:
9348
	 * "The full packet must be contained within the same cache line."
9371
	 * "The full packet must be contained within the same cache line."
9349
	 *
9372
	 *
9350
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
9373
	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
9351
	 * cacheline, if we ever start emitting more commands before
9374
	 * cacheline, if we ever start emitting more commands before
9352
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
9375
	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
9353
	 * then do the cacheline alignment, and finally emit the
9376
	 * then do the cacheline alignment, and finally emit the
9354
	 * MI_DISPLAY_FLIP.
9377
	 * MI_DISPLAY_FLIP.
9355
	 */
9378
	 */
9356
	ret = intel_ring_cacheline_align(ring);
9379
	ret = intel_ring_cacheline_align(ring);
9357
	if (ret)
9380
	if (ret)
9358
		return ret;
9381
		return ret;
9359
 
9382
 
9360
	ret = intel_ring_begin(ring, len);
9383
	ret = intel_ring_begin(ring, len);
9361
	if (ret)
9384
	if (ret)
9362
		return ret;
9385
		return ret;
9363
 
9386
 
9364
	/* Unmask the flip-done completion message. Note that the bspec says that
9387
	/* Unmask the flip-done completion message. Note that the bspec says that
9365
	 * we should do this for both the BCS and RCS, and that we must not unmask
9388
	 * we should do this for both the BCS and RCS, and that we must not unmask
9366
	 * more than one flip event at any time (or ensure that one flip message
9389
	 * more than one flip event at any time (or ensure that one flip message
9367
	 * can be sent by waiting for flip-done prior to queueing new flips).
9390
	 * can be sent by waiting for flip-done prior to queueing new flips).
9368
	 * Experimentation says that BCS works despite DERRMR masking all
9391
	 * Experimentation says that BCS works despite DERRMR masking all
9369
	 * flip-done completion events and that unmasking all planes at once
9392
	 * flip-done completion events and that unmasking all planes at once
9370
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
9393
	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
9371
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
9394
	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
9372
	 */
9395
	 */
9373
	if (ring->id == RCS) {
9396
	if (ring->id == RCS) {
9374
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9397
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9375
		intel_ring_emit(ring, DERRMR);
9398
		intel_ring_emit(ring, DERRMR);
9376
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9399
		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9377
					DERRMR_PIPEB_PRI_FLIP_DONE |
9400
					DERRMR_PIPEB_PRI_FLIP_DONE |
9378
					DERRMR_PIPEC_PRI_FLIP_DONE));
9401
					DERRMR_PIPEC_PRI_FLIP_DONE));
9379
		if (IS_GEN8(dev))
9402
		if (IS_GEN8(dev))
9380
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9403
			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9381
					      MI_SRM_LRM_GLOBAL_GTT);
9404
					      MI_SRM_LRM_GLOBAL_GTT);
9382
		else
9405
		else
9383
		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9406
		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9384
				MI_SRM_LRM_GLOBAL_GTT);
9407
				MI_SRM_LRM_GLOBAL_GTT);
9385
		intel_ring_emit(ring, DERRMR);
9408
		intel_ring_emit(ring, DERRMR);
9386
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9409
		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9387
		if (IS_GEN8(dev)) {
9410
		if (IS_GEN8(dev)) {
9388
			intel_ring_emit(ring, 0);
9411
			intel_ring_emit(ring, 0);
9389
			intel_ring_emit(ring, MI_NOOP);
9412
			intel_ring_emit(ring, MI_NOOP);
9390
		}
9413
		}
9391
	}
9414
	}
9392
 
9415
 
9393
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
9416
	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
9394
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
9417
	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
9395
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9418
	intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9396
	intel_ring_emit(ring, (MI_NOOP));
9419
	intel_ring_emit(ring, (MI_NOOP));
9397
 
9420
 
9398
	intel_mark_page_flip_active(intel_crtc);
9421
	intel_mark_page_flip_active(intel_crtc);
9399
	__intel_ring_advance(ring);
9422
	__intel_ring_advance(ring);
9400
	return 0;
9423
	return 0;
9401
}
9424
}
9402
 
9425
 
9403
static int intel_default_queue_flip(struct drm_device *dev,
9426
static int intel_default_queue_flip(struct drm_device *dev,
9404
				    struct drm_crtc *crtc,
9427
				    struct drm_crtc *crtc,
9405
				    struct drm_framebuffer *fb,
9428
				    struct drm_framebuffer *fb,
9406
				    struct drm_i915_gem_object *obj,
9429
				    struct drm_i915_gem_object *obj,
9407
				    struct intel_engine_cs *ring,
9430
				    struct intel_engine_cs *ring,
9408
				    uint32_t flags)
9431
				    uint32_t flags)
9409
{
9432
{
9410
	return -ENODEV;
9433
	return -ENODEV;
9411
}
9434
}
9412
 
9435
 
9413
static int intel_crtc_page_flip(struct drm_crtc *crtc,
9436
static int intel_crtc_page_flip(struct drm_crtc *crtc,
9414
				struct drm_framebuffer *fb,
9437
				struct drm_framebuffer *fb,
9415
				struct drm_pending_vblank_event *event,
9438
				struct drm_pending_vblank_event *event,
9416
				uint32_t page_flip_flags)
9439
				uint32_t page_flip_flags)
9417
{
9440
{
9418
	struct drm_device *dev = crtc->dev;
9441
	struct drm_device *dev = crtc->dev;
9419
	struct drm_i915_private *dev_priv = dev->dev_private;
9442
	struct drm_i915_private *dev_priv = dev->dev_private;
9420
	struct drm_framebuffer *old_fb = crtc->primary->fb;
9443
	struct drm_framebuffer *old_fb = crtc->primary->fb;
9421
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9444
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9422
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9445
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9423
	enum pipe pipe = intel_crtc->pipe;
9446
	enum pipe pipe = intel_crtc->pipe;
9424
	struct intel_unpin_work *work;
9447
	struct intel_unpin_work *work;
9425
	struct intel_engine_cs *ring;
9448
	struct intel_engine_cs *ring;
9426
	unsigned long flags;
9449
	unsigned long flags;
9427
	int ret;
9450
	int ret;
9428
 
9451
 
9429
	/*
9452
	/*
9430
	 * drm_mode_page_flip_ioctl() should already catch this, but double
9453
	 * drm_mode_page_flip_ioctl() should already catch this, but double
9431
	 * check to be safe.  In the future we may enable pageflipping from
9454
	 * check to be safe.  In the future we may enable pageflipping from
9432
	 * a disabled primary plane.
9455
	 * a disabled primary plane.
9433
	 */
9456
	 */
9434
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
9457
	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
9435
		return -EBUSY;
9458
		return -EBUSY;
9436
 
9459
 
9437
	/* Can't change pixel format via MI display flips. */
9460
	/* Can't change pixel format via MI display flips. */
9438
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
9461
	if (fb->pixel_format != crtc->primary->fb->pixel_format)
9439
		return -EINVAL;
9462
		return -EINVAL;
9440
 
9463
 
9441
	/*
9464
	/*
9442
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
9465
	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
9443
	 * Note that pitch changes could also affect these register.
9466
	 * Note that pitch changes could also affect these register.
9444
	 */
9467
	 */
9445
	if (INTEL_INFO(dev)->gen > 3 &&
9468
	if (INTEL_INFO(dev)->gen > 3 &&
9446
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
9469
	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
9447
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
9470
	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
9448
		return -EINVAL;
9471
		return -EINVAL;
9449
 
9472
 
9450
	work = kzalloc(sizeof(*work), GFP_KERNEL);
9473
	work = kzalloc(sizeof(*work), GFP_KERNEL);
9451
	if (work == NULL)
9474
	if (work == NULL)
9452
		return -ENOMEM;
9475
		return -ENOMEM;
9453
 
9476
 
9454
	work->event = event;
9477
	work->event = event;
9455
	work->crtc = crtc;
9478
	work->crtc = crtc;
9456
	work->old_fb_obj = intel_fb_obj(old_fb);
9479
	work->old_fb_obj = intel_fb_obj(old_fb);
9457
	INIT_WORK(&work->work, intel_unpin_work_fn);
9480
	INIT_WORK(&work->work, intel_unpin_work_fn);
9458
 
9481
 
9459
	ret = drm_crtc_vblank_get(crtc);
9482
	ret = drm_crtc_vblank_get(crtc);
9460
	if (ret)
9483
	if (ret)
9461
		goto free_work;
9484
		goto free_work;
9462
 
9485
 
9463
	/* We borrow the event spin lock for protecting unpin_work */
9486
	/* We borrow the event spin lock for protecting unpin_work */
9464
	spin_lock_irqsave(&dev->event_lock, flags);
9487
	spin_lock_irqsave(&dev->event_lock, flags);
9465
	if (intel_crtc->unpin_work) {
9488
	if (intel_crtc->unpin_work) {
9466
		spin_unlock_irqrestore(&dev->event_lock, flags);
9489
		spin_unlock_irqrestore(&dev->event_lock, flags);
9467
		kfree(work);
9490
		kfree(work);
9468
		drm_crtc_vblank_put(crtc);
9491
		drm_crtc_vblank_put(crtc);
9469
 
9492
 
9470
		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9493
		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9471
		return -EBUSY;
9494
		return -EBUSY;
9472
	}
9495
	}
9473
	intel_crtc->unpin_work = work;
9496
	intel_crtc->unpin_work = work;
9474
	spin_unlock_irqrestore(&dev->event_lock, flags);
9497
	spin_unlock_irqrestore(&dev->event_lock, flags);
9475
 
9498
 
9476
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9499
	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9477
		flush_workqueue(dev_priv->wq);
9500
		flush_workqueue(dev_priv->wq);
9478
 
9501
 
9479
	ret = i915_mutex_lock_interruptible(dev);
9502
	ret = i915_mutex_lock_interruptible(dev);
9480
	if (ret)
9503
	if (ret)
9481
		goto cleanup;
9504
		goto cleanup;
9482
 
9505
 
9483
	/* Reference the objects for the scheduled work. */
9506
	/* Reference the objects for the scheduled work. */
9484
	drm_gem_object_reference(&work->old_fb_obj->base);
9507
	drm_gem_object_reference(&work->old_fb_obj->base);
9485
	drm_gem_object_reference(&obj->base);
9508
	drm_gem_object_reference(&obj->base);
9486
 
9509
 
9487
	crtc->primary->fb = fb;
9510
	crtc->primary->fb = fb;
9488
 
9511
 
9489
	work->pending_flip_obj = obj;
9512
	work->pending_flip_obj = obj;
9490
 
9513
 
9491
	work->enable_stall_check = true;
9514
	work->enable_stall_check = true;
9492
 
9515
 
9493
	atomic_inc(&intel_crtc->unpin_work_count);
9516
	atomic_inc(&intel_crtc->unpin_work_count);
9494
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
9517
	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
9495
 
9518
 
9496
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9519
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9497
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
9520
		work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
9498
 
9521
 
9499
	if (IS_VALLEYVIEW(dev)) {
9522
	if (IS_VALLEYVIEW(dev)) {
9500
		ring = &dev_priv->ring[BCS];
9523
		ring = &dev_priv->ring[BCS];
9501
		if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
9524
		if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
9502
			/* vlv: DISPLAY_FLIP fails to change tiling */
9525
			/* vlv: DISPLAY_FLIP fails to change tiling */
9503
			ring = NULL;
9526
			ring = NULL;
9504
	} else if (IS_IVYBRIDGE(dev)) {
9527
	} else if (IS_IVYBRIDGE(dev)) {
9505
		ring = &dev_priv->ring[BCS];
9528
		ring = &dev_priv->ring[BCS];
9506
	} else if (INTEL_INFO(dev)->gen >= 7) {
9529
	} else if (INTEL_INFO(dev)->gen >= 7) {
9507
		ring = obj->ring;
9530
		ring = obj->ring;
9508
		if (ring == NULL || ring->id != RCS)
9531
		if (ring == NULL || ring->id != RCS)
9509
			ring = &dev_priv->ring[BCS];
9532
			ring = &dev_priv->ring[BCS];
9510
	} else {
9533
	} else {
9511
		ring = &dev_priv->ring[RCS];
9534
		ring = &dev_priv->ring[RCS];
9512
	}
9535
	}
9513
 
9536
 
9514
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
9537
	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
9515
	if (ret)
9538
	if (ret)
9516
		goto cleanup_pending;
9539
		goto cleanup_pending;
9517
 
9540
 
9518
	work->gtt_offset =
9541
	work->gtt_offset =
9519
		i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9542
		i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9520
 
9543
 
9521
	if (use_mmio_flip(ring, obj))
9544
	if (use_mmio_flip(ring, obj))
9522
		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
9545
		ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
9523
					    page_flip_flags);
9546
					    page_flip_flags);
9524
	else
9547
	else
9525
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9548
		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9526
				page_flip_flags);
9549
				page_flip_flags);
9527
	if (ret)
9550
	if (ret)
9528
		goto cleanup_unpin;
9551
		goto cleanup_unpin;
9529
 
9552
 
9530
	i915_gem_track_fb(work->old_fb_obj, obj,
9553
	i915_gem_track_fb(work->old_fb_obj, obj,
9531
			  INTEL_FRONTBUFFER_PRIMARY(pipe));
9554
			  INTEL_FRONTBUFFER_PRIMARY(pipe));
9532
 
9555
 
9533
	intel_disable_fbc(dev);
9556
	intel_disable_fbc(dev);
9534
	intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9557
	intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9535
	mutex_unlock(&dev->struct_mutex);
9558
	mutex_unlock(&dev->struct_mutex);
9536
 
9559
 
9537
	trace_i915_flip_request(intel_crtc->plane, obj);
9560
	trace_i915_flip_request(intel_crtc->plane, obj);
9538
 
9561
 
9539
	return 0;
9562
	return 0;
9540
 
9563
 
9541
cleanup_unpin:
9564
cleanup_unpin:
9542
	intel_unpin_fb_obj(obj);
9565
	intel_unpin_fb_obj(obj);
9543
cleanup_pending:
9566
cleanup_pending:
9544
	atomic_dec(&intel_crtc->unpin_work_count);
9567
	atomic_dec(&intel_crtc->unpin_work_count);
9545
	crtc->primary->fb = old_fb;
9568
	crtc->primary->fb = old_fb;
9546
	drm_gem_object_unreference(&work->old_fb_obj->base);
9569
	drm_gem_object_unreference(&work->old_fb_obj->base);
9547
	drm_gem_object_unreference(&obj->base);
9570
	drm_gem_object_unreference(&obj->base);
9548
	mutex_unlock(&dev->struct_mutex);
9571
	mutex_unlock(&dev->struct_mutex);
9549
 
9572
 
9550
cleanup:
9573
cleanup:
9551
	spin_lock_irqsave(&dev->event_lock, flags);
9574
	spin_lock_irqsave(&dev->event_lock, flags);
9552
	intel_crtc->unpin_work = NULL;
9575
	intel_crtc->unpin_work = NULL;
9553
	spin_unlock_irqrestore(&dev->event_lock, flags);
9576
	spin_unlock_irqrestore(&dev->event_lock, flags);
9554
 
9577
 
9555
	drm_crtc_vblank_put(crtc);
9578
	drm_crtc_vblank_put(crtc);
9556
free_work:
9579
free_work:
9557
	kfree(work);
9580
	kfree(work);
9558
 
9581
 
9559
	if (ret == -EIO) {
9582
	if (ret == -EIO) {
9560
out_hang:
9583
out_hang:
9561
		intel_crtc_wait_for_pending_flips(crtc);
9584
		intel_crtc_wait_for_pending_flips(crtc);
9562
		ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
9585
		ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
9563
		if (ret == 0 && event)
9586
		if (ret == 0 && event)
9564
			drm_send_vblank_event(dev, pipe, event);
9587
			drm_send_vblank_event(dev, pipe, event);
9565
	}
9588
	}
9566
	return ret;
9589
	return ret;
9567
}
9590
}
9568
#endif
9591
#endif
9569
 
9592
 
9570
static struct drm_crtc_helper_funcs intel_helper_funcs = {
9593
static struct drm_crtc_helper_funcs intel_helper_funcs = {
9571
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
9594
	.mode_set_base_atomic = intel_pipe_set_base_atomic,
9572
	.load_lut = intel_crtc_load_lut,
9595
	.load_lut = intel_crtc_load_lut,
9573
};
9596
};
9574
 
9597
 
9575
/**
9598
/**
9576
 * intel_modeset_update_staged_output_state
9599
 * intel_modeset_update_staged_output_state
9577
 *
9600
 *
9578
 * Updates the staged output configuration state, e.g. after we've read out the
9601
 * Updates the staged output configuration state, e.g. after we've read out the
9579
 * current hw state.
9602
 * current hw state.
9580
 */
9603
 */
9581
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9604
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9582
{
9605
{
9583
	struct intel_crtc *crtc;
9606
	struct intel_crtc *crtc;
9584
	struct intel_encoder *encoder;
9607
	struct intel_encoder *encoder;
9585
	struct intel_connector *connector;
9608
	struct intel_connector *connector;
9586
 
9609
 
9587
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9610
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9588
			    base.head) {
9611
			    base.head) {
9589
		connector->new_encoder =
9612
		connector->new_encoder =
9590
			to_intel_encoder(connector->base.encoder);
9613
			to_intel_encoder(connector->base.encoder);
9591
	}
9614
	}
9592
 
9615
 
9593
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9616
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9594
			    base.head) {
9617
			    base.head) {
9595
		encoder->new_crtc =
9618
		encoder->new_crtc =
9596
			to_intel_crtc(encoder->base.crtc);
9619
			to_intel_crtc(encoder->base.crtc);
9597
	}
9620
	}
9598
 
9621
 
9599
	for_each_intel_crtc(dev, crtc) {
9622
	for_each_intel_crtc(dev, crtc) {
9600
		crtc->new_enabled = crtc->base.enabled;
9623
		crtc->new_enabled = crtc->base.enabled;
9601
 
9624
 
9602
		if (crtc->new_enabled)
9625
		if (crtc->new_enabled)
9603
			crtc->new_config = &crtc->config;
9626
			crtc->new_config = &crtc->config;
9604
		else
9627
		else
9605
			crtc->new_config = NULL;
9628
			crtc->new_config = NULL;
9606
	}
9629
	}
9607
}
9630
}
9608
 
9631
 
9609
/**
9632
/**
9610
 * intel_modeset_commit_output_state
9633
 * intel_modeset_commit_output_state
9611
 *
9634
 *
9612
 * This function copies the stage display pipe configuration to the real one.
9635
 * This function copies the stage display pipe configuration to the real one.
9613
 */
9636
 */
9614
static void intel_modeset_commit_output_state(struct drm_device *dev)
9637
static void intel_modeset_commit_output_state(struct drm_device *dev)
9615
{
9638
{
9616
	struct intel_crtc *crtc;
9639
	struct intel_crtc *crtc;
9617
	struct intel_encoder *encoder;
9640
	struct intel_encoder *encoder;
9618
	struct intel_connector *connector;
9641
	struct intel_connector *connector;
9619
 
9642
 
9620
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9643
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9621
			    base.head) {
9644
			    base.head) {
9622
		connector->base.encoder = &connector->new_encoder->base;
9645
		connector->base.encoder = &connector->new_encoder->base;
9623
	}
9646
	}
9624
 
9647
 
9625
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9648
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9626
			    base.head) {
9649
			    base.head) {
9627
		encoder->base.crtc = &encoder->new_crtc->base;
9650
		encoder->base.crtc = &encoder->new_crtc->base;
9628
	}
9651
	}
9629
 
9652
 
9630
	for_each_intel_crtc(dev, crtc) {
9653
	for_each_intel_crtc(dev, crtc) {
9631
		crtc->base.enabled = crtc->new_enabled;
9654
		crtc->base.enabled = crtc->new_enabled;
9632
	}
9655
	}
9633
}
9656
}
9634
 
9657
 
9635
static void
9658
static void
9636
connected_sink_compute_bpp(struct intel_connector *connector,
9659
connected_sink_compute_bpp(struct intel_connector *connector,
9637
			   struct intel_crtc_config *pipe_config)
9660
			   struct intel_crtc_config *pipe_config)
9638
{
9661
{
9639
	int bpp = pipe_config->pipe_bpp;
9662
	int bpp = pipe_config->pipe_bpp;
9640
 
9663
 
9641
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
9664
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
9642
		connector->base.base.id,
9665
		connector->base.base.id,
9643
		connector->base.name);
9666
		connector->base.name);
9644
 
9667
 
9645
	/* Don't use an invalid EDID bpc value */
9668
	/* Don't use an invalid EDID bpc value */
9646
	if (connector->base.display_info.bpc &&
9669
	if (connector->base.display_info.bpc &&
9647
	    connector->base.display_info.bpc * 3 < bpp) {
9670
	    connector->base.display_info.bpc * 3 < bpp) {
9648
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
9671
		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
9649
			      bpp, connector->base.display_info.bpc*3);
9672
			      bpp, connector->base.display_info.bpc*3);
9650
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
9673
		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
9651
	}
9674
	}
9652
 
9675
 
9653
	/* Clamp bpp to 8 on screens without EDID 1.4 */
9676
	/* Clamp bpp to 8 on screens without EDID 1.4 */
9654
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
9677
	if (connector->base.display_info.bpc == 0 && bpp > 24) {
9655
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
9678
		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
9656
			      bpp);
9679
			      bpp);
9657
		pipe_config->pipe_bpp = 24;
9680
		pipe_config->pipe_bpp = 24;
9658
	}
9681
	}
9659
}
9682
}
9660
 
9683
 
9661
static int
9684
static int
9662
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
9685
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
9663
		    struct drm_framebuffer *fb,
9686
		    struct drm_framebuffer *fb,
9664
		    struct intel_crtc_config *pipe_config)
9687
		    struct intel_crtc_config *pipe_config)
9665
{
9688
{
9666
	struct drm_device *dev = crtc->base.dev;
9689
	struct drm_device *dev = crtc->base.dev;
9667
	struct intel_connector *connector;
9690
	struct intel_connector *connector;
9668
	int bpp;
9691
	int bpp;
9669
 
9692
 
9670
	switch (fb->pixel_format) {
9693
	switch (fb->pixel_format) {
9671
	case DRM_FORMAT_C8:
9694
	case DRM_FORMAT_C8:
9672
		bpp = 8*3; /* since we go through a colormap */
9695
		bpp = 8*3; /* since we go through a colormap */
9673
		break;
9696
		break;
9674
	case DRM_FORMAT_XRGB1555:
9697
	case DRM_FORMAT_XRGB1555:
9675
	case DRM_FORMAT_ARGB1555:
9698
	case DRM_FORMAT_ARGB1555:
9676
		/* checked in intel_framebuffer_init already */
9699
		/* checked in intel_framebuffer_init already */
9677
		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
9700
		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
9678
			return -EINVAL;
9701
			return -EINVAL;
9679
	case DRM_FORMAT_RGB565:
9702
	case DRM_FORMAT_RGB565:
9680
		bpp = 6*3; /* min is 18bpp */
9703
		bpp = 6*3; /* min is 18bpp */
9681
		break;
9704
		break;
9682
	case DRM_FORMAT_XBGR8888:
9705
	case DRM_FORMAT_XBGR8888:
9683
	case DRM_FORMAT_ABGR8888:
9706
	case DRM_FORMAT_ABGR8888:
9684
		/* checked in intel_framebuffer_init already */
9707
		/* checked in intel_framebuffer_init already */
9685
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9708
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9686
			return -EINVAL;
9709
			return -EINVAL;
9687
	case DRM_FORMAT_XRGB8888:
9710
	case DRM_FORMAT_XRGB8888:
9688
	case DRM_FORMAT_ARGB8888:
9711
	case DRM_FORMAT_ARGB8888:
9689
		bpp = 8*3;
9712
		bpp = 8*3;
9690
		break;
9713
		break;
9691
	case DRM_FORMAT_XRGB2101010:
9714
	case DRM_FORMAT_XRGB2101010:
9692
	case DRM_FORMAT_ARGB2101010:
9715
	case DRM_FORMAT_ARGB2101010:
9693
	case DRM_FORMAT_XBGR2101010:
9716
	case DRM_FORMAT_XBGR2101010:
9694
	case DRM_FORMAT_ABGR2101010:
9717
	case DRM_FORMAT_ABGR2101010:
9695
		/* checked in intel_framebuffer_init already */
9718
		/* checked in intel_framebuffer_init already */
9696
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9719
		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9697
			return -EINVAL;
9720
			return -EINVAL;
9698
		bpp = 10*3;
9721
		bpp = 10*3;
9699
		break;
9722
		break;
9700
	/* TODO: gen4+ supports 16 bpc floating point, too. */
9723
	/* TODO: gen4+ supports 16 bpc floating point, too. */
9701
	default:
9724
	default:
9702
		DRM_DEBUG_KMS("unsupported depth\n");
9725
		DRM_DEBUG_KMS("unsupported depth\n");
9703
		return -EINVAL;
9726
		return -EINVAL;
9704
	}
9727
	}
9705
 
9728
 
9706
	pipe_config->pipe_bpp = bpp;
9729
	pipe_config->pipe_bpp = bpp;
9707
 
9730
 
9708
	/* Clamp display bpp to EDID value */
9731
	/* Clamp display bpp to EDID value */
9709
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9732
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9710
			    base.head) {
9733
			    base.head) {
9711
		if (!connector->new_encoder ||
9734
		if (!connector->new_encoder ||
9712
		    connector->new_encoder->new_crtc != crtc)
9735
		    connector->new_encoder->new_crtc != crtc)
9713
			continue;
9736
			continue;
9714
 
9737
 
9715
		connected_sink_compute_bpp(connector, pipe_config);
9738
		connected_sink_compute_bpp(connector, pipe_config);
9716
	}
9739
	}
9717
 
9740
 
9718
	return bpp;
9741
	return bpp;
9719
}
9742
}
9720
 
9743
 
9721
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
9744
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
9722
{
9745
{
9723
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
9746
	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
9724
			"type: 0x%x flags: 0x%x\n",
9747
			"type: 0x%x flags: 0x%x\n",
9725
		mode->crtc_clock,
9748
		mode->crtc_clock,
9726
		mode->crtc_hdisplay, mode->crtc_hsync_start,
9749
		mode->crtc_hdisplay, mode->crtc_hsync_start,
9727
		mode->crtc_hsync_end, mode->crtc_htotal,
9750
		mode->crtc_hsync_end, mode->crtc_htotal,
9728
		mode->crtc_vdisplay, mode->crtc_vsync_start,
9751
		mode->crtc_vdisplay, mode->crtc_vsync_start,
9729
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
9752
		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
9730
}
9753
}
9731
 
9754
 
9732
static void intel_dump_pipe_config(struct intel_crtc *crtc,
9755
static void intel_dump_pipe_config(struct intel_crtc *crtc,
9733
				   struct intel_crtc_config *pipe_config,
9756
				   struct intel_crtc_config *pipe_config,
9734
				   const char *context)
9757
				   const char *context)
9735
{
9758
{
9736
	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
9759
	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
9737
		      context, pipe_name(crtc->pipe));
9760
		      context, pipe_name(crtc->pipe));
9738
 
9761
 
9739
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
9762
	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
9740
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
9763
	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
9741
		      pipe_config->pipe_bpp, pipe_config->dither);
9764
		      pipe_config->pipe_bpp, pipe_config->dither);
9742
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9765
	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9743
		      pipe_config->has_pch_encoder,
9766
		      pipe_config->has_pch_encoder,
9744
		      pipe_config->fdi_lanes,
9767
		      pipe_config->fdi_lanes,
9745
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
9768
		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
9746
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
9769
		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
9747
		      pipe_config->fdi_m_n.tu);
9770
		      pipe_config->fdi_m_n.tu);
9748
	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9771
	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9749
		      pipe_config->has_dp_encoder,
9772
		      pipe_config->has_dp_encoder,
9750
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
9773
		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
9751
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
9774
		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
9752
		      pipe_config->dp_m_n.tu);
9775
		      pipe_config->dp_m_n.tu);
9753
	DRM_DEBUG_KMS("requested mode:\n");
9776
	DRM_DEBUG_KMS("requested mode:\n");
9754
	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
9777
	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
9755
	DRM_DEBUG_KMS("adjusted mode:\n");
9778
	DRM_DEBUG_KMS("adjusted mode:\n");
9756
	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
9779
	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
9757
	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
9780
	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
9758
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
9781
	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
9759
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
9782
	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
9760
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
9783
		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
9761
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
9784
	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
9762
		      pipe_config->gmch_pfit.control,
9785
		      pipe_config->gmch_pfit.control,
9763
		      pipe_config->gmch_pfit.pgm_ratios,
9786
		      pipe_config->gmch_pfit.pgm_ratios,
9764
		      pipe_config->gmch_pfit.lvds_border_bits);
9787
		      pipe_config->gmch_pfit.lvds_border_bits);
9765
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
9788
	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
9766
		      pipe_config->pch_pfit.pos,
9789
		      pipe_config->pch_pfit.pos,
9767
		      pipe_config->pch_pfit.size,
9790
		      pipe_config->pch_pfit.size,
9768
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
9791
		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
9769
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
9792
	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
9770
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
9793
	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
9771
}
9794
}
9772
 
9795
 
9773
static bool encoders_cloneable(const struct intel_encoder *a,
9796
static bool encoders_cloneable(const struct intel_encoder *a,
9774
			       const struct intel_encoder *b)
9797
			       const struct intel_encoder *b)
9775
{
9798
{
9776
	/* masks could be asymmetric, so check both ways */
9799
	/* masks could be asymmetric, so check both ways */
9777
	return a == b || (a->cloneable & (1 << b->type) &&
9800
	return a == b || (a->cloneable & (1 << b->type) &&
9778
			  b->cloneable & (1 << a->type));
9801
			  b->cloneable & (1 << a->type));
9779
}
9802
}
9780
 
9803
 
9781
static bool check_single_encoder_cloning(struct intel_crtc *crtc,
9804
static bool check_single_encoder_cloning(struct intel_crtc *crtc,
9782
					 struct intel_encoder *encoder)
9805
					 struct intel_encoder *encoder)
9783
{
9806
{
9784
	struct drm_device *dev = crtc->base.dev;
9807
	struct drm_device *dev = crtc->base.dev;
9785
	struct intel_encoder *source_encoder;
9808
	struct intel_encoder *source_encoder;
9786
 
9809
 
9787
	list_for_each_entry(source_encoder,
9810
	list_for_each_entry(source_encoder,
9788
			    &dev->mode_config.encoder_list, base.head) {
9811
			    &dev->mode_config.encoder_list, base.head) {
9789
		if (source_encoder->new_crtc != crtc)
9812
		if (source_encoder->new_crtc != crtc)
9790
			continue;
9813
			continue;
9791
 
9814
 
9792
		if (!encoders_cloneable(encoder, source_encoder))
9815
		if (!encoders_cloneable(encoder, source_encoder))
9793
			return false;
9816
			return false;
9794
	}
9817
	}
9795
 
9818
 
9796
	return true;
9819
	return true;
9797
}
9820
}
9798
 
9821
 
9799
static bool check_encoder_cloning(struct intel_crtc *crtc)
9822
static bool check_encoder_cloning(struct intel_crtc *crtc)
9800
{
9823
{
9801
	struct drm_device *dev = crtc->base.dev;
9824
	struct drm_device *dev = crtc->base.dev;
9802
	struct intel_encoder *encoder;
9825
	struct intel_encoder *encoder;
9803
 
9826
 
9804
	list_for_each_entry(encoder,
9827
	list_for_each_entry(encoder,
9805
			    &dev->mode_config.encoder_list, base.head) {
9828
			    &dev->mode_config.encoder_list, base.head) {
9806
		if (encoder->new_crtc != crtc)
9829
		if (encoder->new_crtc != crtc)
9807
			continue;
9830
			continue;
9808
 
9831
 
9809
		if (!check_single_encoder_cloning(crtc, encoder))
9832
		if (!check_single_encoder_cloning(crtc, encoder))
9810
			return false;
9833
			return false;
9811
	}
9834
	}
9812
 
9835
 
9813
	return true;
9836
	return true;
9814
}
9837
}
9815
 
9838
 
9816
static struct intel_crtc_config *
9839
static struct intel_crtc_config *
9817
intel_modeset_pipe_config(struct drm_crtc *crtc,
9840
intel_modeset_pipe_config(struct drm_crtc *crtc,
9818
			  struct drm_framebuffer *fb,
9841
			  struct drm_framebuffer *fb,
9819
			    struct drm_display_mode *mode)
9842
			    struct drm_display_mode *mode)
9820
{
9843
{
9821
	struct drm_device *dev = crtc->dev;
9844
	struct drm_device *dev = crtc->dev;
9822
	struct intel_encoder *encoder;
9845
	struct intel_encoder *encoder;
9823
	struct intel_crtc_config *pipe_config;
9846
	struct intel_crtc_config *pipe_config;
9824
	int plane_bpp, ret = -EINVAL;
9847
	int plane_bpp, ret = -EINVAL;
9825
	bool retry = true;
9848
	bool retry = true;
9826
 
9849
 
9827
	if (!check_encoder_cloning(to_intel_crtc(crtc))) {
9850
	if (!check_encoder_cloning(to_intel_crtc(crtc))) {
9828
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
9851
		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
9829
		return ERR_PTR(-EINVAL);
9852
		return ERR_PTR(-EINVAL);
9830
	}
9853
	}
9831
 
9854
 
9832
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
9855
	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
9833
	if (!pipe_config)
9856
	if (!pipe_config)
9834
		return ERR_PTR(-ENOMEM);
9857
		return ERR_PTR(-ENOMEM);
9835
 
9858
 
9836
	drm_mode_copy(&pipe_config->adjusted_mode, mode);
9859
	drm_mode_copy(&pipe_config->adjusted_mode, mode);
9837
	drm_mode_copy(&pipe_config->requested_mode, mode);
9860
	drm_mode_copy(&pipe_config->requested_mode, mode);
9838
 
9861
 
9839
	pipe_config->cpu_transcoder =
9862
	pipe_config->cpu_transcoder =
9840
		(enum transcoder) to_intel_crtc(crtc)->pipe;
9863
		(enum transcoder) to_intel_crtc(crtc)->pipe;
9841
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9864
	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9842
 
9865
 
9843
	/*
9866
	/*
9844
	 * Sanitize sync polarity flags based on requested ones. If neither
9867
	 * Sanitize sync polarity flags based on requested ones. If neither
9845
	 * positive or negative polarity is requested, treat this as meaning
9868
	 * positive or negative polarity is requested, treat this as meaning
9846
	 * negative polarity.
9869
	 * negative polarity.
9847
	 */
9870
	 */
9848
	if (!(pipe_config->adjusted_mode.flags &
9871
	if (!(pipe_config->adjusted_mode.flags &
9849
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
9872
	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
9850
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
9873
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
9851
 
9874
 
9852
	if (!(pipe_config->adjusted_mode.flags &
9875
	if (!(pipe_config->adjusted_mode.flags &
9853
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
9876
	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
9854
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
9877
		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
9855
 
9878
 
9856
	/* Compute a starting value for pipe_config->pipe_bpp taking the source
9879
	/* Compute a starting value for pipe_config->pipe_bpp taking the source
9857
	 * plane pixel format and any sink constraints into account. Returns the
9880
	 * plane pixel format and any sink constraints into account. Returns the
9858
	 * source plane bpp so that dithering can be selected on mismatches
9881
	 * source plane bpp so that dithering can be selected on mismatches
9859
	 * after encoders and crtc also have had their say. */
9882
	 * after encoders and crtc also have had their say. */
9860
	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
9883
	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
9861
					      fb, pipe_config);
9884
					      fb, pipe_config);
9862
	if (plane_bpp < 0)
9885
	if (plane_bpp < 0)
9863
		goto fail;
9886
		goto fail;
9864
 
9887
 
9865
	/*
9888
	/*
9866
	 * Determine the real pipe dimensions. Note that stereo modes can
9889
	 * Determine the real pipe dimensions. Note that stereo modes can
9867
	 * increase the actual pipe size due to the frame doubling and
9890
	 * increase the actual pipe size due to the frame doubling and
9868
	 * insertion of additional space for blanks between the frame. This
9891
	 * insertion of additional space for blanks between the frame. This
9869
	 * is stored in the crtc timings. We use the requested mode to do this
9892
	 * is stored in the crtc timings. We use the requested mode to do this
9870
	 * computation to clearly distinguish it from the adjusted mode, which
9893
	 * computation to clearly distinguish it from the adjusted mode, which
9871
	 * can be changed by the connectors in the below retry loop.
9894
	 * can be changed by the connectors in the below retry loop.
9872
	 */
9895
	 */
9873
	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
9896
	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
9874
	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
9897
	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
9875
	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
9898
	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
9876
 
9899
 
9877
encoder_retry:
9900
encoder_retry:
9878
	/* Ensure the port clock defaults are reset when retrying. */
9901
	/* Ensure the port clock defaults are reset when retrying. */
9879
	pipe_config->port_clock = 0;
9902
	pipe_config->port_clock = 0;
9880
	pipe_config->pixel_multiplier = 1;
9903
	pipe_config->pixel_multiplier = 1;
9881
 
9904
 
9882
	/* Fill in default crtc timings, allow encoders to overwrite them. */
9905
	/* Fill in default crtc timings, allow encoders to overwrite them. */
9883
	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
9906
	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
9884
 
9907
 
9885
	/* Pass our mode to the connectors and the CRTC to give them a chance to
9908
	/* Pass our mode to the connectors and the CRTC to give them a chance to
9886
	 * adjust it according to limitations or connector properties, and also
9909
	 * adjust it according to limitations or connector properties, and also
9887
	 * a chance to reject the mode entirely.
9910
	 * a chance to reject the mode entirely.
9888
	 */
9911
	 */
9889
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9912
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9890
			    base.head) {
9913
			    base.head) {
9891
 
9914
 
9892
		if (&encoder->new_crtc->base != crtc)
9915
		if (&encoder->new_crtc->base != crtc)
9893
			continue;
9916
			continue;
9894
 
9917
 
9895
			if (!(encoder->compute_config(encoder, pipe_config))) {
9918
			if (!(encoder->compute_config(encoder, pipe_config))) {
9896
				DRM_DEBUG_KMS("Encoder config failure\n");
9919
				DRM_DEBUG_KMS("Encoder config failure\n");
9897
				goto fail;
9920
				goto fail;
9898
			}
9921
			}
9899
		}
9922
		}
9900
 
9923
 
9901
	/* Set default port clock if not overwritten by the encoder. Needs to be
9924
	/* Set default port clock if not overwritten by the encoder. Needs to be
9902
	 * done afterwards in case the encoder adjusts the mode. */
9925
	 * done afterwards in case the encoder adjusts the mode. */
9903
	if (!pipe_config->port_clock)
9926
	if (!pipe_config->port_clock)
9904
		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
9927
		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
9905
			* pipe_config->pixel_multiplier;
9928
			* pipe_config->pixel_multiplier;
9906
 
9929
 
9907
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
9930
	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
9908
	if (ret < 0) {
9931
	if (ret < 0) {
9909
		DRM_DEBUG_KMS("CRTC fixup failed\n");
9932
		DRM_DEBUG_KMS("CRTC fixup failed\n");
9910
		goto fail;
9933
		goto fail;
9911
	}
9934
	}
9912
 
9935
 
9913
	if (ret == RETRY) {
9936
	if (ret == RETRY) {
9914
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
9937
		if (WARN(!retry, "loop in pipe configuration computation\n")) {
9915
			ret = -EINVAL;
9938
			ret = -EINVAL;
9916
			goto fail;
9939
			goto fail;
9917
		}
9940
		}
9918
 
9941
 
9919
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
9942
		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
9920
		retry = false;
9943
		retry = false;
9921
		goto encoder_retry;
9944
		goto encoder_retry;
9922
	}
9945
	}
9923
 
9946
 
9924
	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
9947
	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
9925
	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
9948
	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
9926
		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
9949
		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
9927
 
9950
 
9928
	return pipe_config;
9951
	return pipe_config;
9929
fail:
9952
fail:
9930
	kfree(pipe_config);
9953
	kfree(pipe_config);
9931
	return ERR_PTR(ret);
9954
	return ERR_PTR(ret);
9932
}
9955
}
9933
 
9956
 
9934
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
9957
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
9935
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
9958
 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
9936
static void
9959
static void
9937
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9960
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9938
			     unsigned *prepare_pipes, unsigned *disable_pipes)
9961
			     unsigned *prepare_pipes, unsigned *disable_pipes)
9939
{
9962
{
9940
	struct intel_crtc *intel_crtc;
9963
	struct intel_crtc *intel_crtc;
9941
	struct drm_device *dev = crtc->dev;
9964
	struct drm_device *dev = crtc->dev;
9942
	struct intel_encoder *encoder;
9965
	struct intel_encoder *encoder;
9943
	struct intel_connector *connector;
9966
	struct intel_connector *connector;
9944
	struct drm_crtc *tmp_crtc;
9967
	struct drm_crtc *tmp_crtc;
9945
 
9968
 
9946
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
9969
	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
9947
 
9970
 
9948
	/* Check which crtcs have changed outputs connected to them, these need
9971
	/* Check which crtcs have changed outputs connected to them, these need
9949
	 * to be part of the prepare_pipes mask. We don't (yet) support global
9972
	 * to be part of the prepare_pipes mask. We don't (yet) support global
9950
	 * modeset across multiple crtcs, so modeset_pipes will only have one
9973
	 * modeset across multiple crtcs, so modeset_pipes will only have one
9951
	 * bit set at most. */
9974
	 * bit set at most. */
9952
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9975
	list_for_each_entry(connector, &dev->mode_config.connector_list,
9953
			    base.head) {
9976
			    base.head) {
9954
		if (connector->base.encoder == &connector->new_encoder->base)
9977
		if (connector->base.encoder == &connector->new_encoder->base)
9955
			continue;
9978
			continue;
9956
 
9979
 
9957
		if (connector->base.encoder) {
9980
		if (connector->base.encoder) {
9958
			tmp_crtc = connector->base.encoder->crtc;
9981
			tmp_crtc = connector->base.encoder->crtc;
9959
 
9982
 
9960
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9983
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9961
		}
9984
		}
9962
 
9985
 
9963
		if (connector->new_encoder)
9986
		if (connector->new_encoder)
9964
			*prepare_pipes |=
9987
			*prepare_pipes |=
9965
				1 << connector->new_encoder->new_crtc->pipe;
9988
				1 << connector->new_encoder->new_crtc->pipe;
9966
	}
9989
	}
9967
 
9990
 
9968
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9991
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9969
			    base.head) {
9992
			    base.head) {
9970
		if (encoder->base.crtc == &encoder->new_crtc->base)
9993
		if (encoder->base.crtc == &encoder->new_crtc->base)
9971
			continue;
9994
			continue;
9972
 
9995
 
9973
		if (encoder->base.crtc) {
9996
		if (encoder->base.crtc) {
9974
			tmp_crtc = encoder->base.crtc;
9997
			tmp_crtc = encoder->base.crtc;
9975
 
9998
 
9976
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9999
			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9977
		}
10000
		}
9978
 
10001
 
9979
		if (encoder->new_crtc)
10002
		if (encoder->new_crtc)
9980
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
10003
			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
9981
	}
10004
	}
9982
 
10005
 
9983
	/* Check for pipes that will be enabled/disabled ... */
10006
	/* Check for pipes that will be enabled/disabled ... */
9984
	for_each_intel_crtc(dev, intel_crtc) {
10007
	for_each_intel_crtc(dev, intel_crtc) {
9985
		if (intel_crtc->base.enabled == intel_crtc->new_enabled)
10008
		if (intel_crtc->base.enabled == intel_crtc->new_enabled)
9986
			continue;
10009
			continue;
9987
 
10010
 
9988
		if (!intel_crtc->new_enabled)
10011
		if (!intel_crtc->new_enabled)
9989
			*disable_pipes |= 1 << intel_crtc->pipe;
10012
			*disable_pipes |= 1 << intel_crtc->pipe;
9990
		else
10013
		else
9991
			*prepare_pipes |= 1 << intel_crtc->pipe;
10014
			*prepare_pipes |= 1 << intel_crtc->pipe;
9992
	}
10015
	}
9993
 
10016
 
9994
 
10017
 
9995
	/* set_mode is also used to update properties on life display pipes. */
10018
	/* set_mode is also used to update properties on life display pipes. */
9996
	intel_crtc = to_intel_crtc(crtc);
10019
	intel_crtc = to_intel_crtc(crtc);
9997
	if (intel_crtc->new_enabled)
10020
	if (intel_crtc->new_enabled)
9998
		*prepare_pipes |= 1 << intel_crtc->pipe;
10021
		*prepare_pipes |= 1 << intel_crtc->pipe;
9999
 
10022
 
10000
	/*
10023
	/*
10001
	 * For simplicity do a full modeset on any pipe where the output routing
10024
	 * For simplicity do a full modeset on any pipe where the output routing
10002
	 * changed. We could be more clever, but that would require us to be
10025
	 * changed. We could be more clever, but that would require us to be
10003
	 * more careful with calling the relevant encoder->mode_set functions.
10026
	 * more careful with calling the relevant encoder->mode_set functions.
10004
	 */
10027
	 */
10005
	if (*prepare_pipes)
10028
	if (*prepare_pipes)
10006
		*modeset_pipes = *prepare_pipes;
10029
		*modeset_pipes = *prepare_pipes;
10007
 
10030
 
10008
	/* ... and mask these out. */
10031
	/* ... and mask these out. */
10009
	*modeset_pipes &= ~(*disable_pipes);
10032
	*modeset_pipes &= ~(*disable_pipes);
10010
	*prepare_pipes &= ~(*disable_pipes);
10033
	*prepare_pipes &= ~(*disable_pipes);
10011
 
10034
 
10012
	/*
10035
	/*
10013
	 * HACK: We don't (yet) fully support global modesets. intel_set_config
10036
	 * HACK: We don't (yet) fully support global modesets. intel_set_config
10014
	 * obies this rule, but the modeset restore mode of
10037
	 * obies this rule, but the modeset restore mode of
10015
	 * intel_modeset_setup_hw_state does not.
10038
	 * intel_modeset_setup_hw_state does not.
10016
	 */
10039
	 */
10017
	*modeset_pipes &= 1 << intel_crtc->pipe;
10040
	*modeset_pipes &= 1 << intel_crtc->pipe;
10018
	*prepare_pipes &= 1 << intel_crtc->pipe;
10041
	*prepare_pipes &= 1 << intel_crtc->pipe;
10019
 
10042
 
10020
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
10043
	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
10021
		      *modeset_pipes, *prepare_pipes, *disable_pipes);
10044
		      *modeset_pipes, *prepare_pipes, *disable_pipes);
10022
}
10045
}
10023
 
10046
 
10024
static bool intel_crtc_in_use(struct drm_crtc *crtc)
10047
static bool intel_crtc_in_use(struct drm_crtc *crtc)
10025
{
10048
{
10026
	struct drm_encoder *encoder;
10049
	struct drm_encoder *encoder;
10027
	struct drm_device *dev = crtc->dev;
10050
	struct drm_device *dev = crtc->dev;
10028
 
10051
 
10029
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
10052
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
10030
		if (encoder->crtc == crtc)
10053
		if (encoder->crtc == crtc)
10031
			return true;
10054
			return true;
10032
 
10055
 
10033
	return false;
10056
	return false;
10034
}
10057
}
10035
 
10058
 
10036
static void
10059
static void
10037
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10060
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10038
{
10061
{
10039
	struct intel_encoder *intel_encoder;
10062
	struct intel_encoder *intel_encoder;
10040
	struct intel_crtc *intel_crtc;
10063
	struct intel_crtc *intel_crtc;
10041
	struct drm_connector *connector;
10064
	struct drm_connector *connector;
10042
 
10065
 
10043
	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
10066
	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
10044
			    base.head) {
10067
			    base.head) {
10045
		if (!intel_encoder->base.crtc)
10068
		if (!intel_encoder->base.crtc)
10046
			continue;
10069
			continue;
10047
 
10070
 
10048
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
10071
		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
10049
 
10072
 
10050
		if (prepare_pipes & (1 << intel_crtc->pipe))
10073
		if (prepare_pipes & (1 << intel_crtc->pipe))
10051
			intel_encoder->connectors_active = false;
10074
			intel_encoder->connectors_active = false;
10052
	}
10075
	}
10053
 
10076
 
10054
	intel_modeset_commit_output_state(dev);
10077
	intel_modeset_commit_output_state(dev);
10055
 
10078
 
10056
	/* Double check state. */
10079
	/* Double check state. */
10057
	for_each_intel_crtc(dev, intel_crtc) {
10080
	for_each_intel_crtc(dev, intel_crtc) {
10058
		WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
10081
		WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
10059
		WARN_ON(intel_crtc->new_config &&
10082
		WARN_ON(intel_crtc->new_config &&
10060
			intel_crtc->new_config != &intel_crtc->config);
10083
			intel_crtc->new_config != &intel_crtc->config);
10061
		WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
10084
		WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
10062
	}
10085
	}
10063
 
10086
 
10064
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10087
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10065
		if (!connector->encoder || !connector->encoder->crtc)
10088
		if (!connector->encoder || !connector->encoder->crtc)
10066
			continue;
10089
			continue;
10067
 
10090
 
10068
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
10091
		intel_crtc = to_intel_crtc(connector->encoder->crtc);
10069
 
10092
 
10070
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
10093
		if (prepare_pipes & (1 << intel_crtc->pipe)) {
10071
			struct drm_property *dpms_property =
10094
			struct drm_property *dpms_property =
10072
				dev->mode_config.dpms_property;
10095
				dev->mode_config.dpms_property;
10073
 
10096
 
10074
			connector->dpms = DRM_MODE_DPMS_ON;
10097
			connector->dpms = DRM_MODE_DPMS_ON;
10075
			drm_object_property_set_value(&connector->base,
10098
			drm_object_property_set_value(&connector->base,
10076
							 dpms_property,
10099
							 dpms_property,
10077
							 DRM_MODE_DPMS_ON);
10100
							 DRM_MODE_DPMS_ON);
10078
 
10101
 
10079
			intel_encoder = to_intel_encoder(connector->encoder);
10102
			intel_encoder = to_intel_encoder(connector->encoder);
10080
			intel_encoder->connectors_active = true;
10103
			intel_encoder->connectors_active = true;
10081
		}
10104
		}
10082
	}
10105
	}
10083
 
10106
 
10084
}
10107
}
10085
 
10108
 
10086
static bool intel_fuzzy_clock_check(int clock1, int clock2)
10109
static bool intel_fuzzy_clock_check(int clock1, int clock2)
10087
{
10110
{
10088
	int diff;
10111
	int diff;
10089
 
10112
 
10090
	if (clock1 == clock2)
10113
	if (clock1 == clock2)
10091
		return true;
10114
		return true;
10092
 
10115
 
10093
	if (!clock1 || !clock2)
10116
	if (!clock1 || !clock2)
10094
		return false;
10117
		return false;
10095
 
10118
 
10096
	diff = abs(clock1 - clock2);
10119
	diff = abs(clock1 - clock2);
10097
 
10120
 
10098
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
10121
	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
10099
		return true;
10122
		return true;
10100
 
10123
 
10101
	return false;
10124
	return false;
10102
}
10125
}
10103
 
10126
 
10104
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
10127
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
10105
	list_for_each_entry((intel_crtc), \
10128
	list_for_each_entry((intel_crtc), \
10106
			    &(dev)->mode_config.crtc_list, \
10129
			    &(dev)->mode_config.crtc_list, \
10107
			    base.head) \
10130
			    base.head) \
10108
		if (mask & (1 <<(intel_crtc)->pipe))
10131
		if (mask & (1 <<(intel_crtc)->pipe))
10109
 
10132
 
10110
static bool
10133
static bool
10111
intel_pipe_config_compare(struct drm_device *dev,
10134
intel_pipe_config_compare(struct drm_device *dev,
10112
			  struct intel_crtc_config *current_config,
10135
			  struct intel_crtc_config *current_config,
10113
			  struct intel_crtc_config *pipe_config)
10136
			  struct intel_crtc_config *pipe_config)
10114
{
10137
{
10115
#define PIPE_CONF_CHECK_X(name)	\
10138
#define PIPE_CONF_CHECK_X(name)	\
10116
	if (current_config->name != pipe_config->name) { \
10139
	if (current_config->name != pipe_config->name) { \
10117
		DRM_ERROR("mismatch in " #name " " \
10140
		DRM_ERROR("mismatch in " #name " " \
10118
			  "(expected 0x%08x, found 0x%08x)\n", \
10141
			  "(expected 0x%08x, found 0x%08x)\n", \
10119
			  current_config->name, \
10142
			  current_config->name, \
10120
			  pipe_config->name); \
10143
			  pipe_config->name); \
10121
		return false; \
10144
		return false; \
10122
	}
10145
	}
10123
 
10146
 
10124
#define PIPE_CONF_CHECK_I(name)	\
10147
#define PIPE_CONF_CHECK_I(name)	\
10125
	if (current_config->name != pipe_config->name) { \
10148
	if (current_config->name != pipe_config->name) { \
10126
		DRM_ERROR("mismatch in " #name " " \
10149
		DRM_ERROR("mismatch in " #name " " \
10127
			  "(expected %i, found %i)\n", \
10150
			  "(expected %i, found %i)\n", \
10128
			  current_config->name, \
10151
			  current_config->name, \
10129
			  pipe_config->name); \
10152
			  pipe_config->name); \
10130
		return false; \
10153
		return false; \
10131
	}
10154
	}
10132
 
10155
 
10133
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
10156
#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
10134
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
10157
	if ((current_config->name ^ pipe_config->name) & (mask)) { \
10135
		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
10158
		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
10136
			  "(expected %i, found %i)\n", \
10159
			  "(expected %i, found %i)\n", \
10137
			  current_config->name & (mask), \
10160
			  current_config->name & (mask), \
10138
			  pipe_config->name & (mask)); \
10161
			  pipe_config->name & (mask)); \
10139
		return false; \
10162
		return false; \
10140
	}
10163
	}
10141
 
10164
 
10142
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
10165
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
10143
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10166
	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10144
		DRM_ERROR("mismatch in " #name " " \
10167
		DRM_ERROR("mismatch in " #name " " \
10145
			  "(expected %i, found %i)\n", \
10168
			  "(expected %i, found %i)\n", \
10146
			  current_config->name, \
10169
			  current_config->name, \
10147
			  pipe_config->name); \
10170
			  pipe_config->name); \
10148
		return false; \
10171
		return false; \
10149
	}
10172
	}
10150
 
10173
 
10151
#define PIPE_CONF_QUIRK(quirk)	\
10174
#define PIPE_CONF_QUIRK(quirk)	\
10152
	((current_config->quirks | pipe_config->quirks) & (quirk))
10175
	((current_config->quirks | pipe_config->quirks) & (quirk))
10153
 
10176
 
10154
	PIPE_CONF_CHECK_I(cpu_transcoder);
10177
	PIPE_CONF_CHECK_I(cpu_transcoder);
10155
 
10178
 
10156
	PIPE_CONF_CHECK_I(has_pch_encoder);
10179
	PIPE_CONF_CHECK_I(has_pch_encoder);
10157
	PIPE_CONF_CHECK_I(fdi_lanes);
10180
	PIPE_CONF_CHECK_I(fdi_lanes);
10158
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
10181
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
10159
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
10182
	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
10160
	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
10183
	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
10161
	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
10184
	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
10162
	PIPE_CONF_CHECK_I(fdi_m_n.tu);
10185
	PIPE_CONF_CHECK_I(fdi_m_n.tu);
10163
 
10186
 
10164
	PIPE_CONF_CHECK_I(has_dp_encoder);
10187
	PIPE_CONF_CHECK_I(has_dp_encoder);
10165
	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
10188
	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
10166
	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
10189
	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
10167
	PIPE_CONF_CHECK_I(dp_m_n.link_m);
10190
	PIPE_CONF_CHECK_I(dp_m_n.link_m);
10168
	PIPE_CONF_CHECK_I(dp_m_n.link_n);
10191
	PIPE_CONF_CHECK_I(dp_m_n.link_n);
10169
	PIPE_CONF_CHECK_I(dp_m_n.tu);
10192
	PIPE_CONF_CHECK_I(dp_m_n.tu);
10170
 
10193
 
10171
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
10194
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
10172
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
10195
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
10173
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
10196
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
10174
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
10197
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
10175
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
10198
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
10176
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
10199
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
10177
 
10200
 
10178
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
10201
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
10179
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
10202
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
10180
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
10203
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
10181
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
10204
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
10182
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
10205
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
10183
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
10206
	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
10184
 
10207
 
10185
		PIPE_CONF_CHECK_I(pixel_multiplier);
10208
		PIPE_CONF_CHECK_I(pixel_multiplier);
10186
	PIPE_CONF_CHECK_I(has_hdmi_sink);
10209
	PIPE_CONF_CHECK_I(has_hdmi_sink);
10187
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
10210
	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
10188
	    IS_VALLEYVIEW(dev))
10211
	    IS_VALLEYVIEW(dev))
10189
		PIPE_CONF_CHECK_I(limited_color_range);
10212
		PIPE_CONF_CHECK_I(limited_color_range);
10190
 
10213
 
10191
	PIPE_CONF_CHECK_I(has_audio);
10214
	PIPE_CONF_CHECK_I(has_audio);
10192
 
10215
 
10193
	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10216
	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10194
			      DRM_MODE_FLAG_INTERLACE);
10217
			      DRM_MODE_FLAG_INTERLACE);
10195
 
10218
 
10196
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
10219
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
10197
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10220
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10198
				      DRM_MODE_FLAG_PHSYNC);
10221
				      DRM_MODE_FLAG_PHSYNC);
10199
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10222
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10200
				      DRM_MODE_FLAG_NHSYNC);
10223
				      DRM_MODE_FLAG_NHSYNC);
10201
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10224
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10202
				      DRM_MODE_FLAG_PVSYNC);
10225
				      DRM_MODE_FLAG_PVSYNC);
10203
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10226
		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10204
				      DRM_MODE_FLAG_NVSYNC);
10227
				      DRM_MODE_FLAG_NVSYNC);
10205
	}
10228
	}
10206
 
10229
 
10207
	PIPE_CONF_CHECK_I(pipe_src_w);
10230
	PIPE_CONF_CHECK_I(pipe_src_w);
10208
	PIPE_CONF_CHECK_I(pipe_src_h);
10231
	PIPE_CONF_CHECK_I(pipe_src_h);
10209
 
10232
 
10210
	/*
10233
	/*
10211
	 * FIXME: BIOS likes to set up a cloned config with lvds+external
10234
	 * FIXME: BIOS likes to set up a cloned config with lvds+external
10212
	 * screen. Since we don't yet re-compute the pipe config when moving
10235
	 * screen. Since we don't yet re-compute the pipe config when moving
10213
	 * just the lvds port away to another pipe the sw tracking won't match.
10236
	 * just the lvds port away to another pipe the sw tracking won't match.
10214
	 *
10237
	 *
10215
	 * Proper atomic modesets with recomputed global state will fix this.
10238
	 * Proper atomic modesets with recomputed global state will fix this.
10216
	 * Until then just don't check gmch state for inherited modes.
10239
	 * Until then just don't check gmch state for inherited modes.
10217
	 */
10240
	 */
10218
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
10241
	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
10219
	PIPE_CONF_CHECK_I(gmch_pfit.control);
10242
	PIPE_CONF_CHECK_I(gmch_pfit.control);
10220
	/* pfit ratios are autocomputed by the hw on gen4+ */
10243
	/* pfit ratios are autocomputed by the hw on gen4+ */
10221
	if (INTEL_INFO(dev)->gen < 4)
10244
	if (INTEL_INFO(dev)->gen < 4)
10222
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
10245
		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
10223
	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
10246
	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
10224
	}
10247
	}
10225
 
10248
 
10226
	PIPE_CONF_CHECK_I(pch_pfit.enabled);
10249
	PIPE_CONF_CHECK_I(pch_pfit.enabled);
10227
	if (current_config->pch_pfit.enabled) {
10250
	if (current_config->pch_pfit.enabled) {
10228
	PIPE_CONF_CHECK_I(pch_pfit.pos);
10251
	PIPE_CONF_CHECK_I(pch_pfit.pos);
10229
	PIPE_CONF_CHECK_I(pch_pfit.size);
10252
	PIPE_CONF_CHECK_I(pch_pfit.size);
10230
	}
10253
	}
10231
 
10254
 
10232
	/* BDW+ don't expose a synchronous way to read the state */
10255
	/* BDW+ don't expose a synchronous way to read the state */
10233
	if (IS_HASWELL(dev))
10256
	if (IS_HASWELL(dev))
10234
	PIPE_CONF_CHECK_I(ips_enabled);
10257
	PIPE_CONF_CHECK_I(ips_enabled);
10235
 
10258
 
10236
	PIPE_CONF_CHECK_I(double_wide);
10259
	PIPE_CONF_CHECK_I(double_wide);
10237
 
10260
 
10238
	PIPE_CONF_CHECK_X(ddi_pll_sel);
10261
	PIPE_CONF_CHECK_X(ddi_pll_sel);
10239
 
10262
 
10240
	PIPE_CONF_CHECK_I(shared_dpll);
10263
	PIPE_CONF_CHECK_I(shared_dpll);
10241
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10264
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10242
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10265
	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10243
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10266
	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10244
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10267
	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10245
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
10268
	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
10246
 
10269
 
10247
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10270
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10248
		PIPE_CONF_CHECK_I(pipe_bpp);
10271
		PIPE_CONF_CHECK_I(pipe_bpp);
10249
 
10272
 
10250
		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
10273
		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
10251
		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10274
		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10252
 
10275
 
10253
#undef PIPE_CONF_CHECK_X
10276
#undef PIPE_CONF_CHECK_X
10254
#undef PIPE_CONF_CHECK_I
10277
#undef PIPE_CONF_CHECK_I
10255
#undef PIPE_CONF_CHECK_FLAGS
10278
#undef PIPE_CONF_CHECK_FLAGS
10256
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
10279
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
10257
#undef PIPE_CONF_QUIRK
10280
#undef PIPE_CONF_QUIRK
10258
 
10281
 
10259
	return true;
10282
	return true;
10260
}
10283
}
10261
 
10284
 
10262
static void
10285
static void
10263
check_connector_state(struct drm_device *dev)
10286
check_connector_state(struct drm_device *dev)
10264
{
10287
{
10265
	struct intel_connector *connector;
10288
	struct intel_connector *connector;
10266
 
10289
 
10267
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10290
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10268
			    base.head) {
10291
			    base.head) {
10269
		/* This also checks the encoder/connector hw state with the
10292
		/* This also checks the encoder/connector hw state with the
10270
		 * ->get_hw_state callbacks. */
10293
		 * ->get_hw_state callbacks. */
10271
		intel_connector_check_state(connector);
10294
		intel_connector_check_state(connector);
10272
 
10295
 
10273
		WARN(&connector->new_encoder->base != connector->base.encoder,
10296
		WARN(&connector->new_encoder->base != connector->base.encoder,
10274
		     "connector's staged encoder doesn't match current encoder\n");
10297
		     "connector's staged encoder doesn't match current encoder\n");
10275
	}
10298
	}
10276
}
10299
}
10277
 
10300
 
10278
static void
10301
static void
10279
check_encoder_state(struct drm_device *dev)
10302
check_encoder_state(struct drm_device *dev)
10280
{
10303
{
10281
	struct intel_encoder *encoder;
10304
	struct intel_encoder *encoder;
10282
	struct intel_connector *connector;
10305
	struct intel_connector *connector;
10283
 
10306
 
10284
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10307
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10285
			    base.head) {
10308
			    base.head) {
10286
		bool enabled = false;
10309
		bool enabled = false;
10287
		bool active = false;
10310
		bool active = false;
10288
		enum pipe pipe, tracked_pipe;
10311
		enum pipe pipe, tracked_pipe;
10289
 
10312
 
10290
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
10313
		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
10291
			      encoder->base.base.id,
10314
			      encoder->base.base.id,
10292
			      encoder->base.name);
10315
			      encoder->base.name);
10293
 
10316
 
10294
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
10317
		WARN(&encoder->new_crtc->base != encoder->base.crtc,
10295
		     "encoder's stage crtc doesn't match current crtc\n");
10318
		     "encoder's stage crtc doesn't match current crtc\n");
10296
		WARN(encoder->connectors_active && !encoder->base.crtc,
10319
		WARN(encoder->connectors_active && !encoder->base.crtc,
10297
		     "encoder's active_connectors set, but no crtc\n");
10320
		     "encoder's active_connectors set, but no crtc\n");
10298
 
10321
 
10299
		list_for_each_entry(connector, &dev->mode_config.connector_list,
10322
		list_for_each_entry(connector, &dev->mode_config.connector_list,
10300
				    base.head) {
10323
				    base.head) {
10301
			if (connector->base.encoder != &encoder->base)
10324
			if (connector->base.encoder != &encoder->base)
10302
				continue;
10325
				continue;
10303
			enabled = true;
10326
			enabled = true;
10304
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
10327
			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
10305
				active = true;
10328
				active = true;
10306
		}
10329
		}
10307
		/*
10330
		/*
10308
		 * for MST connectors if we unplug the connector is gone
10331
		 * for MST connectors if we unplug the connector is gone
10309
		 * away but the encoder is still connected to a crtc
10332
		 * away but the encoder is still connected to a crtc
10310
		 * until a modeset happens in response to the hotplug.
10333
		 * until a modeset happens in response to the hotplug.
10311
		 */
10334
		 */
10312
		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
10335
		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
10313
			continue;
10336
			continue;
10314
 
10337
 
10315
		WARN(!!encoder->base.crtc != enabled,
10338
		WARN(!!encoder->base.crtc != enabled,
10316
		     "encoder's enabled state mismatch "
10339
		     "encoder's enabled state mismatch "
10317
		     "(expected %i, found %i)\n",
10340
		     "(expected %i, found %i)\n",
10318
		     !!encoder->base.crtc, enabled);
10341
		     !!encoder->base.crtc, enabled);
10319
		WARN(active && !encoder->base.crtc,
10342
		WARN(active && !encoder->base.crtc,
10320
		     "active encoder with no crtc\n");
10343
		     "active encoder with no crtc\n");
10321
 
10344
 
10322
		WARN(encoder->connectors_active != active,
10345
		WARN(encoder->connectors_active != active,
10323
		     "encoder's computed active state doesn't match tracked active state "
10346
		     "encoder's computed active state doesn't match tracked active state "
10324
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
10347
		     "(expected %i, found %i)\n", active, encoder->connectors_active);
10325
 
10348
 
10326
		active = encoder->get_hw_state(encoder, &pipe);
10349
		active = encoder->get_hw_state(encoder, &pipe);
10327
		WARN(active != encoder->connectors_active,
10350
		WARN(active != encoder->connectors_active,
10328
		     "encoder's hw state doesn't match sw tracking "
10351
		     "encoder's hw state doesn't match sw tracking "
10329
		     "(expected %i, found %i)\n",
10352
		     "(expected %i, found %i)\n",
10330
		     encoder->connectors_active, active);
10353
		     encoder->connectors_active, active);
10331
 
10354
 
10332
		if (!encoder->base.crtc)
10355
		if (!encoder->base.crtc)
10333
			continue;
10356
			continue;
10334
 
10357
 
10335
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
10358
		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
10336
		WARN(active && pipe != tracked_pipe,
10359
		WARN(active && pipe != tracked_pipe,
10337
		     "active encoder's pipe doesn't match"
10360
		     "active encoder's pipe doesn't match"
10338
		     "(expected %i, found %i)\n",
10361
		     "(expected %i, found %i)\n",
10339
		     tracked_pipe, pipe);
10362
		     tracked_pipe, pipe);
10340
 
10363
 
10341
	}
10364
	}
10342
}
10365
}
10343
 
10366
 
10344
static void
10367
static void
10345
check_crtc_state(struct drm_device *dev)
10368
check_crtc_state(struct drm_device *dev)
10346
{
10369
{
10347
	struct drm_i915_private *dev_priv = dev->dev_private;
10370
	struct drm_i915_private *dev_priv = dev->dev_private;
10348
	struct intel_crtc *crtc;
10371
	struct intel_crtc *crtc;
10349
	struct intel_encoder *encoder;
10372
	struct intel_encoder *encoder;
10350
	struct intel_crtc_config pipe_config;
10373
	struct intel_crtc_config pipe_config;
10351
 
10374
 
10352
	for_each_intel_crtc(dev, crtc) {
10375
	for_each_intel_crtc(dev, crtc) {
10353
		bool enabled = false;
10376
		bool enabled = false;
10354
		bool active = false;
10377
		bool active = false;
10355
 
10378
 
10356
		memset(&pipe_config, 0, sizeof(pipe_config));
10379
		memset(&pipe_config, 0, sizeof(pipe_config));
10357
 
10380
 
10358
		DRM_DEBUG_KMS("[CRTC:%d]\n",
10381
		DRM_DEBUG_KMS("[CRTC:%d]\n",
10359
			      crtc->base.base.id);
10382
			      crtc->base.base.id);
10360
 
10383
 
10361
		WARN(crtc->active && !crtc->base.enabled,
10384
		WARN(crtc->active && !crtc->base.enabled,
10362
		     "active crtc, but not enabled in sw tracking\n");
10385
		     "active crtc, but not enabled in sw tracking\n");
10363
 
10386
 
10364
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10387
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10365
				    base.head) {
10388
				    base.head) {
10366
			if (encoder->base.crtc != &crtc->base)
10389
			if (encoder->base.crtc != &crtc->base)
10367
				continue;
10390
				continue;
10368
			enabled = true;
10391
			enabled = true;
10369
			if (encoder->connectors_active)
10392
			if (encoder->connectors_active)
10370
				active = true;
10393
				active = true;
10371
		}
10394
		}
10372
 
10395
 
10373
		WARN(active != crtc->active,
10396
		WARN(active != crtc->active,
10374
		     "crtc's computed active state doesn't match tracked active state "
10397
		     "crtc's computed active state doesn't match tracked active state "
10375
		     "(expected %i, found %i)\n", active, crtc->active);
10398
		     "(expected %i, found %i)\n", active, crtc->active);
10376
		WARN(enabled != crtc->base.enabled,
10399
		WARN(enabled != crtc->base.enabled,
10377
		     "crtc's computed enabled state doesn't match tracked enabled state "
10400
		     "crtc's computed enabled state doesn't match tracked enabled state "
10378
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
10401
		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
10379
 
10402
 
10380
		active = dev_priv->display.get_pipe_config(crtc,
10403
		active = dev_priv->display.get_pipe_config(crtc,
10381
							   &pipe_config);
10404
							   &pipe_config);
10382
 
10405
 
10383
		/* hw state is inconsistent with the pipe A quirk */
10406
		/* hw state is inconsistent with the pipe A quirk */
10384
		if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
10407
		if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
10385
			active = crtc->active;
10408
			active = crtc->active;
10386
 
10409
 
10387
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10410
		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10388
				    base.head) {
10411
				    base.head) {
10389
			enum pipe pipe;
10412
			enum pipe pipe;
10390
			if (encoder->base.crtc != &crtc->base)
10413
			if (encoder->base.crtc != &crtc->base)
10391
				continue;
10414
				continue;
10392
			if (encoder->get_hw_state(encoder, &pipe))
10415
			if (encoder->get_hw_state(encoder, &pipe))
10393
				encoder->get_config(encoder, &pipe_config);
10416
				encoder->get_config(encoder, &pipe_config);
10394
		}
10417
		}
10395
 
10418
 
10396
		WARN(crtc->active != active,
10419
		WARN(crtc->active != active,
10397
		     "crtc active state doesn't match with hw state "
10420
		     "crtc active state doesn't match with hw state "
10398
		     "(expected %i, found %i)\n", crtc->active, active);
10421
		     "(expected %i, found %i)\n", crtc->active, active);
10399
 
10422
 
10400
		if (active &&
10423
		if (active &&
10401
		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
10424
		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
10402
			WARN(1, "pipe state doesn't match!\n");
10425
			WARN(1, "pipe state doesn't match!\n");
10403
			intel_dump_pipe_config(crtc, &pipe_config,
10426
			intel_dump_pipe_config(crtc, &pipe_config,
10404
					       "[hw state]");
10427
					       "[hw state]");
10405
			intel_dump_pipe_config(crtc, &crtc->config,
10428
			intel_dump_pipe_config(crtc, &crtc->config,
10406
					       "[sw state]");
10429
					       "[sw state]");
10407
		}
10430
		}
10408
	}
10431
	}
10409
}
10432
}
10410
 
10433
 
10411
static void
10434
static void
10412
check_shared_dpll_state(struct drm_device *dev)
10435
check_shared_dpll_state(struct drm_device *dev)
10413
{
10436
{
10414
	struct drm_i915_private *dev_priv = dev->dev_private;
10437
	struct drm_i915_private *dev_priv = dev->dev_private;
10415
	struct intel_crtc *crtc;
10438
	struct intel_crtc *crtc;
10416
	struct intel_dpll_hw_state dpll_hw_state;
10439
	struct intel_dpll_hw_state dpll_hw_state;
10417
	int i;
10440
	int i;
10418
 
10441
 
10419
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10442
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10420
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10443
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10421
		int enabled_crtcs = 0, active_crtcs = 0;
10444
		int enabled_crtcs = 0, active_crtcs = 0;
10422
		bool active;
10445
		bool active;
10423
 
10446
 
10424
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10447
		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10425
 
10448
 
10426
		DRM_DEBUG_KMS("%s\n", pll->name);
10449
		DRM_DEBUG_KMS("%s\n", pll->name);
10427
 
10450
 
10428
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
10451
		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
10429
 
10452
 
10430
		WARN(pll->active > pll->refcount,
10453
		WARN(pll->active > pll->refcount,
10431
		     "more active pll users than references: %i vs %i\n",
10454
		     "more active pll users than references: %i vs %i\n",
10432
		     pll->active, pll->refcount);
10455
		     pll->active, pll->refcount);
10433
		WARN(pll->active && !pll->on,
10456
		WARN(pll->active && !pll->on,
10434
		     "pll in active use but not on in sw tracking\n");
10457
		     "pll in active use but not on in sw tracking\n");
10435
		WARN(pll->on && !pll->active,
10458
		WARN(pll->on && !pll->active,
10436
		     "pll in on but not on in use in sw tracking\n");
10459
		     "pll in on but not on in use in sw tracking\n");
10437
		WARN(pll->on != active,
10460
		WARN(pll->on != active,
10438
		     "pll on state mismatch (expected %i, found %i)\n",
10461
		     "pll on state mismatch (expected %i, found %i)\n",
10439
		     pll->on, active);
10462
		     pll->on, active);
10440
 
10463
 
10441
		for_each_intel_crtc(dev, crtc) {
10464
		for_each_intel_crtc(dev, crtc) {
10442
			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
10465
			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
10443
				enabled_crtcs++;
10466
				enabled_crtcs++;
10444
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10467
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10445
				active_crtcs++;
10468
				active_crtcs++;
10446
		}
10469
		}
10447
		WARN(pll->active != active_crtcs,
10470
		WARN(pll->active != active_crtcs,
10448
		     "pll active crtcs mismatch (expected %i, found %i)\n",
10471
		     "pll active crtcs mismatch (expected %i, found %i)\n",
10449
		     pll->active, active_crtcs);
10472
		     pll->active, active_crtcs);
10450
		WARN(pll->refcount != enabled_crtcs,
10473
		WARN(pll->refcount != enabled_crtcs,
10451
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
10474
		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
10452
		     pll->refcount, enabled_crtcs);
10475
		     pll->refcount, enabled_crtcs);
10453
 
10476
 
10454
		WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
10477
		WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
10455
				       sizeof(dpll_hw_state)),
10478
				       sizeof(dpll_hw_state)),
10456
		     "pll hw state mismatch\n");
10479
		     "pll hw state mismatch\n");
10457
	}
10480
	}
10458
}
10481
}
10459
 
10482
 
10460
void
10483
void
10461
intel_modeset_check_state(struct drm_device *dev)
10484
intel_modeset_check_state(struct drm_device *dev)
10462
{
10485
{
10463
	check_connector_state(dev);
10486
	check_connector_state(dev);
10464
	check_encoder_state(dev);
10487
	check_encoder_state(dev);
10465
	check_crtc_state(dev);
10488
	check_crtc_state(dev);
10466
	check_shared_dpll_state(dev);
10489
	check_shared_dpll_state(dev);
10467
}
10490
}
10468
 
10491
 
10469
void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
10492
void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
10470
				     int dotclock)
10493
				     int dotclock)
10471
{
10494
{
10472
	/*
10495
	/*
10473
	 * FDI already provided one idea for the dotclock.
10496
	 * FDI already provided one idea for the dotclock.
10474
	 * Yell if the encoder disagrees.
10497
	 * Yell if the encoder disagrees.
10475
	 */
10498
	 */
10476
	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
10499
	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
10477
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10500
	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10478
	     pipe_config->adjusted_mode.crtc_clock, dotclock);
10501
	     pipe_config->adjusted_mode.crtc_clock, dotclock);
10479
}
10502
}
10480
 
10503
 
10481
static void update_scanline_offset(struct intel_crtc *crtc)
10504
static void update_scanline_offset(struct intel_crtc *crtc)
10482
{
10505
{
10483
	struct drm_device *dev = crtc->base.dev;
10506
	struct drm_device *dev = crtc->base.dev;
10484
 
10507
 
10485
	/*
10508
	/*
10486
	 * The scanline counter increments at the leading edge of hsync.
10509
	 * The scanline counter increments at the leading edge of hsync.
10487
	 *
10510
	 *
10488
	 * On most platforms it starts counting from vtotal-1 on the
10511
	 * On most platforms it starts counting from vtotal-1 on the
10489
	 * first active line. That means the scanline counter value is
10512
	 * first active line. That means the scanline counter value is
10490
	 * always one less than what we would expect. Ie. just after
10513
	 * always one less than what we would expect. Ie. just after
10491
	 * start of vblank, which also occurs at start of hsync (on the
10514
	 * start of vblank, which also occurs at start of hsync (on the
10492
	 * last active line), the scanline counter will read vblank_start-1.
10515
	 * last active line), the scanline counter will read vblank_start-1.
10493
	 *
10516
	 *
10494
	 * On gen2 the scanline counter starts counting from 1 instead
10517
	 * On gen2 the scanline counter starts counting from 1 instead
10495
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10518
	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10496
	 * to keep the value positive), instead of adding one.
10519
	 * to keep the value positive), instead of adding one.
10497
	 *
10520
	 *
10498
	 * On HSW+ the behaviour of the scanline counter depends on the output
10521
	 * On HSW+ the behaviour of the scanline counter depends on the output
10499
	 * type. For DP ports it behaves like most other platforms, but on HDMI
10522
	 * type. For DP ports it behaves like most other platforms, but on HDMI
10500
	 * there's an extra 1 line difference. So we need to add two instead of
10523
	 * there's an extra 1 line difference. So we need to add two instead of
10501
	 * one to the value.
10524
	 * one to the value.
10502
	 */
10525
	 */
10503
	if (IS_GEN2(dev)) {
10526
	if (IS_GEN2(dev)) {
10504
		const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
10527
		const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
10505
		int vtotal;
10528
		int vtotal;
10506
 
10529
 
10507
		vtotal = mode->crtc_vtotal;
10530
		vtotal = mode->crtc_vtotal;
10508
		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
10531
		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
10509
			vtotal /= 2;
10532
			vtotal /= 2;
10510
 
10533
 
10511
		crtc->scanline_offset = vtotal - 1;
10534
		crtc->scanline_offset = vtotal - 1;
10512
	} else if (HAS_DDI(dev) &&
10535
	} else if (HAS_DDI(dev) &&
10513
		   intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
10536
		   intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
10514
		crtc->scanline_offset = 2;
10537
		crtc->scanline_offset = 2;
10515
	} else
10538
	} else
10516
		crtc->scanline_offset = 1;
10539
		crtc->scanline_offset = 1;
10517
}
10540
}
10518
 
10541
 
10519
static int __intel_set_mode(struct drm_crtc *crtc,
10542
static int __intel_set_mode(struct drm_crtc *crtc,
10520
		    struct drm_display_mode *mode,
10543
		    struct drm_display_mode *mode,
10521
		    int x, int y, struct drm_framebuffer *fb)
10544
		    int x, int y, struct drm_framebuffer *fb)
10522
{
10545
{
10523
	struct drm_device *dev = crtc->dev;
10546
	struct drm_device *dev = crtc->dev;
10524
	struct drm_i915_private *dev_priv = dev->dev_private;
10547
	struct drm_i915_private *dev_priv = dev->dev_private;
10525
	struct drm_display_mode *saved_mode;
10548
	struct drm_display_mode *saved_mode;
10526
	struct intel_crtc_config *pipe_config = NULL;
10549
	struct intel_crtc_config *pipe_config = NULL;
10527
	struct intel_crtc *intel_crtc;
10550
	struct intel_crtc *intel_crtc;
10528
	unsigned disable_pipes, prepare_pipes, modeset_pipes;
10551
	unsigned disable_pipes, prepare_pipes, modeset_pipes;
10529
	int ret = 0;
10552
	int ret = 0;
10530
 
10553
 
10531
	saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
10554
	saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
10532
	if (!saved_mode)
10555
	if (!saved_mode)
10533
		return -ENOMEM;
10556
		return -ENOMEM;
10534
 
10557
 
10535
	intel_modeset_affected_pipes(crtc, &modeset_pipes,
10558
	intel_modeset_affected_pipes(crtc, &modeset_pipes,
10536
				     &prepare_pipes, &disable_pipes);
10559
				     &prepare_pipes, &disable_pipes);
10537
 
10560
 
10538
	*saved_mode = crtc->mode;
10561
	*saved_mode = crtc->mode;
10539
 
10562
 
10540
	/* Hack: Because we don't (yet) support global modeset on multiple
10563
	/* Hack: Because we don't (yet) support global modeset on multiple
10541
	 * crtcs, we don't keep track of the new mode for more than one crtc.
10564
	 * crtcs, we don't keep track of the new mode for more than one crtc.
10542
	 * Hence simply check whether any bit is set in modeset_pipes in all the
10565
	 * Hence simply check whether any bit is set in modeset_pipes in all the
10543
	 * pieces of code that are not yet converted to deal with mutliple crtcs
10566
	 * pieces of code that are not yet converted to deal with mutliple crtcs
10544
	 * changing their mode at the same time. */
10567
	 * changing their mode at the same time. */
10545
	if (modeset_pipes) {
10568
	if (modeset_pipes) {
10546
		pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
10569
		pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
10547
		if (IS_ERR(pipe_config)) {
10570
		if (IS_ERR(pipe_config)) {
10548
			ret = PTR_ERR(pipe_config);
10571
			ret = PTR_ERR(pipe_config);
10549
			pipe_config = NULL;
10572
			pipe_config = NULL;
10550
 
10573
 
10551
			goto out;
10574
			goto out;
10552
		}
10575
		}
10553
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
10576
		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
10554
				       "[modeset]");
10577
				       "[modeset]");
10555
		to_intel_crtc(crtc)->new_config = pipe_config;
10578
		to_intel_crtc(crtc)->new_config = pipe_config;
10556
	}
10579
	}
10557
 
10580
 
10558
	/*
10581
	/*
10559
	 * See if the config requires any additional preparation, e.g.
10582
	 * See if the config requires any additional preparation, e.g.
10560
	 * to adjust global state with pipes off.  We need to do this
10583
	 * to adjust global state with pipes off.  We need to do this
10561
	 * here so we can get the modeset_pipe updated config for the new
10584
	 * here so we can get the modeset_pipe updated config for the new
10562
	 * mode set on this crtc.  For other crtcs we need to use the
10585
	 * mode set on this crtc.  For other crtcs we need to use the
10563
	 * adjusted_mode bits in the crtc directly.
10586
	 * adjusted_mode bits in the crtc directly.
10564
	 */
10587
	 */
10565
	if (IS_VALLEYVIEW(dev)) {
10588
	if (IS_VALLEYVIEW(dev)) {
10566
		valleyview_modeset_global_pipes(dev, &prepare_pipes);
10589
		valleyview_modeset_global_pipes(dev, &prepare_pipes);
10567
 
10590
 
10568
		/* may have added more to prepare_pipes than we should */
10591
		/* may have added more to prepare_pipes than we should */
10569
		prepare_pipes &= ~disable_pipes;
10592
		prepare_pipes &= ~disable_pipes;
10570
	}
10593
	}
10571
 
10594
 
10572
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
10595
	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
10573
		intel_crtc_disable(&intel_crtc->base);
10596
		intel_crtc_disable(&intel_crtc->base);
10574
 
10597
 
10575
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10598
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10576
		if (intel_crtc->base.enabled)
10599
		if (intel_crtc->base.enabled)
10577
			dev_priv->display.crtc_disable(&intel_crtc->base);
10600
			dev_priv->display.crtc_disable(&intel_crtc->base);
10578
	}
10601
	}
10579
 
10602
 
10580
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
10603
	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
10581
	 * to set it here already despite that we pass it down the callchain.
10604
	 * to set it here already despite that we pass it down the callchain.
10582
	 */
10605
	 */
10583
	if (modeset_pipes) {
10606
	if (modeset_pipes) {
10584
		crtc->mode = *mode;
10607
		crtc->mode = *mode;
10585
		/* mode_set/enable/disable functions rely on a correct pipe
10608
		/* mode_set/enable/disable functions rely on a correct pipe
10586
		 * config. */
10609
		 * config. */
10587
		to_intel_crtc(crtc)->config = *pipe_config;
10610
		to_intel_crtc(crtc)->config = *pipe_config;
10588
		to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
10611
		to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
10589
 
10612
 
10590
		/*
10613
		/*
10591
		 * Calculate and store various constants which
10614
		 * Calculate and store various constants which
10592
		 * are later needed by vblank and swap-completion
10615
		 * are later needed by vblank and swap-completion
10593
		 * timestamping. They are derived from true hwmode.
10616
		 * timestamping. They are derived from true hwmode.
10594
		 */
10617
		 */
10595
		drm_calc_timestamping_constants(crtc,
10618
		drm_calc_timestamping_constants(crtc,
10596
						&pipe_config->adjusted_mode);
10619
						&pipe_config->adjusted_mode);
10597
	}
10620
	}
10598
 
10621
 
10599
	/* Only after disabling all output pipelines that will be changed can we
10622
	/* Only after disabling all output pipelines that will be changed can we
10600
	 * update the the output configuration. */
10623
	 * update the the output configuration. */
10601
	intel_modeset_update_state(dev, prepare_pipes);
10624
	intel_modeset_update_state(dev, prepare_pipes);
10602
 
10625
 
10603
	if (dev_priv->display.modeset_global_resources)
10626
	if (dev_priv->display.modeset_global_resources)
10604
		dev_priv->display.modeset_global_resources(dev);
10627
		dev_priv->display.modeset_global_resources(dev);
10605
 
10628
 
10606
	/* Set up the DPLL and any encoders state that needs to adjust or depend
10629
	/* Set up the DPLL and any encoders state that needs to adjust or depend
10607
	 * on the DPLL.
10630
	 * on the DPLL.
10608
	 */
10631
	 */
10609
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
10632
	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
10610
		struct drm_framebuffer *old_fb = crtc->primary->fb;
10633
		struct drm_framebuffer *old_fb = crtc->primary->fb;
10611
		struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
10634
		struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
10612
		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10635
		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10613
 
10636
 
10614
		mutex_lock(&dev->struct_mutex);
10637
		mutex_lock(&dev->struct_mutex);
10615
		ret = intel_pin_and_fence_fb_obj(dev,
10638
		ret = intel_pin_and_fence_fb_obj(dev,
10616
						 obj,
10639
						 obj,
10617
						 NULL);
10640
						 NULL);
10618
		if (ret != 0) {
10641
		if (ret != 0) {
10619
			DRM_ERROR("pin & fence failed\n");
10642
			DRM_ERROR("pin & fence failed\n");
10620
			mutex_unlock(&dev->struct_mutex);
10643
			mutex_unlock(&dev->struct_mutex);
10621
			goto done;
10644
			goto done;
10622
		}
10645
		}
10623
		if (old_fb)
10646
		if (old_fb)
10624
			intel_unpin_fb_obj(old_obj);
10647
			intel_unpin_fb_obj(old_obj);
10625
		i915_gem_track_fb(old_obj, obj,
10648
		i915_gem_track_fb(old_obj, obj,
10626
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
10649
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
10627
		mutex_unlock(&dev->struct_mutex);
10650
		mutex_unlock(&dev->struct_mutex);
10628
 
10651
 
10629
		crtc->primary->fb = fb;
10652
		crtc->primary->fb = fb;
10630
		crtc->x = x;
10653
		crtc->x = x;
10631
		crtc->y = y;
10654
		crtc->y = y;
10632
 
10655
 
10633
		ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
10656
		ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
10634
					   x, y, fb);
10657
					   x, y, fb);
10635
		if (ret)
10658
		if (ret)
10636
		    goto done;
10659
		    goto done;
10637
	}
10660
	}
10638
 
10661
 
10639
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10662
	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10640
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10663
	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10641
		update_scanline_offset(intel_crtc);
10664
		update_scanline_offset(intel_crtc);
10642
 
10665
 
10643
		dev_priv->display.crtc_enable(&intel_crtc->base);
10666
		dev_priv->display.crtc_enable(&intel_crtc->base);
10644
	}
10667
	}
10645
 
10668
 
10646
	/* FIXME: add subpixel order */
10669
	/* FIXME: add subpixel order */
10647
done:
10670
done:
10648
	if (ret && crtc->enabled)
10671
	if (ret && crtc->enabled)
10649
		crtc->mode = *saved_mode;
10672
		crtc->mode = *saved_mode;
10650
 
10673
 
10651
out:
10674
out:
10652
	kfree(pipe_config);
10675
	kfree(pipe_config);
10653
	kfree(saved_mode);
10676
	kfree(saved_mode);
10654
	return ret;
10677
	return ret;
10655
}
10678
}
10656
 
10679
 
10657
static int intel_set_mode(struct drm_crtc *crtc,
10680
static int intel_set_mode(struct drm_crtc *crtc,
10658
		     struct drm_display_mode *mode,
10681
		     struct drm_display_mode *mode,
10659
		     int x, int y, struct drm_framebuffer *fb)
10682
		     int x, int y, struct drm_framebuffer *fb)
10660
{
10683
{
10661
	int ret;
10684
	int ret;
10662
 
10685
 
10663
	ret = __intel_set_mode(crtc, mode, x, y, fb);
10686
	ret = __intel_set_mode(crtc, mode, x, y, fb);
10664
 
10687
 
10665
	if (ret == 0)
10688
	if (ret == 0)
10666
		intel_modeset_check_state(crtc->dev);
10689
		intel_modeset_check_state(crtc->dev);
10667
 
10690
 
10668
	return ret;
10691
	return ret;
10669
}
10692
}
10670
 
10693
 
10671
void intel_crtc_restore_mode(struct drm_crtc *crtc)
10694
void intel_crtc_restore_mode(struct drm_crtc *crtc)
10672
{
10695
{
10673
	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
10696
	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
10674
}
10697
}
10675
 
10698
 
10676
#undef for_each_intel_crtc_masked
10699
#undef for_each_intel_crtc_masked
10677
 
10700
 
10678
static void intel_set_config_free(struct intel_set_config *config)
10701
static void intel_set_config_free(struct intel_set_config *config)
10679
{
10702
{
10680
	if (!config)
10703
	if (!config)
10681
		return;
10704
		return;
10682
 
10705
 
10683
	kfree(config->save_connector_encoders);
10706
	kfree(config->save_connector_encoders);
10684
	kfree(config->save_encoder_crtcs);
10707
	kfree(config->save_encoder_crtcs);
10685
	kfree(config->save_crtc_enabled);
10708
	kfree(config->save_crtc_enabled);
10686
	kfree(config);
10709
	kfree(config);
10687
}
10710
}
10688
 
10711
 
10689
static int intel_set_config_save_state(struct drm_device *dev,
10712
static int intel_set_config_save_state(struct drm_device *dev,
10690
				       struct intel_set_config *config)
10713
				       struct intel_set_config *config)
10691
{
10714
{
10692
	struct drm_crtc *crtc;
10715
	struct drm_crtc *crtc;
10693
	struct drm_encoder *encoder;
10716
	struct drm_encoder *encoder;
10694
	struct drm_connector *connector;
10717
	struct drm_connector *connector;
10695
	int count;
10718
	int count;
10696
 
10719
 
10697
	config->save_crtc_enabled =
10720
	config->save_crtc_enabled =
10698
		kcalloc(dev->mode_config.num_crtc,
10721
		kcalloc(dev->mode_config.num_crtc,
10699
			sizeof(bool), GFP_KERNEL);
10722
			sizeof(bool), GFP_KERNEL);
10700
	if (!config->save_crtc_enabled)
10723
	if (!config->save_crtc_enabled)
10701
		return -ENOMEM;
10724
		return -ENOMEM;
10702
 
10725
 
10703
	config->save_encoder_crtcs =
10726
	config->save_encoder_crtcs =
10704
		kcalloc(dev->mode_config.num_encoder,
10727
		kcalloc(dev->mode_config.num_encoder,
10705
			sizeof(struct drm_crtc *), GFP_KERNEL);
10728
			sizeof(struct drm_crtc *), GFP_KERNEL);
10706
	if (!config->save_encoder_crtcs)
10729
	if (!config->save_encoder_crtcs)
10707
		return -ENOMEM;
10730
		return -ENOMEM;
10708
 
10731
 
10709
	config->save_connector_encoders =
10732
	config->save_connector_encoders =
10710
		kcalloc(dev->mode_config.num_connector,
10733
		kcalloc(dev->mode_config.num_connector,
10711
			sizeof(struct drm_encoder *), GFP_KERNEL);
10734
			sizeof(struct drm_encoder *), GFP_KERNEL);
10712
	if (!config->save_connector_encoders)
10735
	if (!config->save_connector_encoders)
10713
		return -ENOMEM;
10736
		return -ENOMEM;
10714
 
10737
 
10715
	/* Copy data. Note that driver private data is not affected.
10738
	/* Copy data. Note that driver private data is not affected.
10716
	 * Should anything bad happen only the expected state is
10739
	 * Should anything bad happen only the expected state is
10717
	 * restored, not the drivers personal bookkeeping.
10740
	 * restored, not the drivers personal bookkeeping.
10718
	 */
10741
	 */
10719
	count = 0;
10742
	count = 0;
10720
	for_each_crtc(dev, crtc) {
10743
	for_each_crtc(dev, crtc) {
10721
		config->save_crtc_enabled[count++] = crtc->enabled;
10744
		config->save_crtc_enabled[count++] = crtc->enabled;
10722
	}
10745
	}
10723
 
10746
 
10724
	count = 0;
10747
	count = 0;
10725
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
10748
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
10726
		config->save_encoder_crtcs[count++] = encoder->crtc;
10749
		config->save_encoder_crtcs[count++] = encoder->crtc;
10727
	}
10750
	}
10728
 
10751
 
10729
	count = 0;
10752
	count = 0;
10730
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10753
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10731
		config->save_connector_encoders[count++] = connector->encoder;
10754
		config->save_connector_encoders[count++] = connector->encoder;
10732
	}
10755
	}
10733
 
10756
 
10734
	return 0;
10757
	return 0;
10735
}
10758
}
10736
 
10759
 
10737
static void intel_set_config_restore_state(struct drm_device *dev,
10760
static void intel_set_config_restore_state(struct drm_device *dev,
10738
					   struct intel_set_config *config)
10761
					   struct intel_set_config *config)
10739
{
10762
{
10740
	struct intel_crtc *crtc;
10763
	struct intel_crtc *crtc;
10741
	struct intel_encoder *encoder;
10764
	struct intel_encoder *encoder;
10742
	struct intel_connector *connector;
10765
	struct intel_connector *connector;
10743
	int count;
10766
	int count;
10744
 
10767
 
10745
	count = 0;
10768
	count = 0;
10746
	for_each_intel_crtc(dev, crtc) {
10769
	for_each_intel_crtc(dev, crtc) {
10747
		crtc->new_enabled = config->save_crtc_enabled[count++];
10770
		crtc->new_enabled = config->save_crtc_enabled[count++];
10748
 
10771
 
10749
		if (crtc->new_enabled)
10772
		if (crtc->new_enabled)
10750
			crtc->new_config = &crtc->config;
10773
			crtc->new_config = &crtc->config;
10751
		else
10774
		else
10752
			crtc->new_config = NULL;
10775
			crtc->new_config = NULL;
10753
	}
10776
	}
10754
 
10777
 
10755
	count = 0;
10778
	count = 0;
10756
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10779
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10757
		encoder->new_crtc =
10780
		encoder->new_crtc =
10758
			to_intel_crtc(config->save_encoder_crtcs[count++]);
10781
			to_intel_crtc(config->save_encoder_crtcs[count++]);
10759
	}
10782
	}
10760
 
10783
 
10761
	count = 0;
10784
	count = 0;
10762
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
10785
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
10763
		connector->new_encoder =
10786
		connector->new_encoder =
10764
			to_intel_encoder(config->save_connector_encoders[count++]);
10787
			to_intel_encoder(config->save_connector_encoders[count++]);
10765
	}
10788
	}
10766
}
10789
}
10767
 
10790
 
10768
static bool
10791
static bool
10769
is_crtc_connector_off(struct drm_mode_set *set)
10792
is_crtc_connector_off(struct drm_mode_set *set)
10770
{
10793
{
10771
	int i;
10794
	int i;
10772
 
10795
 
10773
	if (set->num_connectors == 0)
10796
	if (set->num_connectors == 0)
10774
		return false;
10797
		return false;
10775
 
10798
 
10776
	if (WARN_ON(set->connectors == NULL))
10799
	if (WARN_ON(set->connectors == NULL))
10777
		return false;
10800
		return false;
10778
 
10801
 
10779
	for (i = 0; i < set->num_connectors; i++)
10802
	for (i = 0; i < set->num_connectors; i++)
10780
		if (set->connectors[i]->encoder &&
10803
		if (set->connectors[i]->encoder &&
10781
		    set->connectors[i]->encoder->crtc == set->crtc &&
10804
		    set->connectors[i]->encoder->crtc == set->crtc &&
10782
		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
10805
		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
10783
			return true;
10806
			return true;
10784
 
10807
 
10785
	return false;
10808
	return false;
10786
}
10809
}
10787
 
10810
 
10788
static void
10811
static void
10789
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
10812
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
10790
				      struct intel_set_config *config)
10813
				      struct intel_set_config *config)
10791
{
10814
{
10792
 
10815
 
10793
	/* We should be able to check here if the fb has the same properties
10816
	/* We should be able to check here if the fb has the same properties
10794
	 * and then just flip_or_move it */
10817
	 * and then just flip_or_move it */
10795
	if (is_crtc_connector_off(set)) {
10818
	if (is_crtc_connector_off(set)) {
10796
			config->mode_changed = true;
10819
			config->mode_changed = true;
10797
	} else if (set->crtc->primary->fb != set->fb) {
10820
	} else if (set->crtc->primary->fb != set->fb) {
10798
		/*
10821
		/*
10799
		 * If we have no fb, we can only flip as long as the crtc is
10822
		 * If we have no fb, we can only flip as long as the crtc is
10800
		 * active, otherwise we need a full mode set.  The crtc may
10823
		 * active, otherwise we need a full mode set.  The crtc may
10801
		 * be active if we've only disabled the primary plane, or
10824
		 * be active if we've only disabled the primary plane, or
10802
		 * in fastboot situations.
10825
		 * in fastboot situations.
10803
		 */
10826
		 */
10804
		if (set->crtc->primary->fb == NULL) {
10827
		if (set->crtc->primary->fb == NULL) {
10805
			struct intel_crtc *intel_crtc =
10828
			struct intel_crtc *intel_crtc =
10806
				to_intel_crtc(set->crtc);
10829
				to_intel_crtc(set->crtc);
10807
 
10830
 
10808
			if (intel_crtc->active) {
10831
			if (intel_crtc->active) {
10809
				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
10832
				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
10810
				config->fb_changed = true;
10833
				config->fb_changed = true;
10811
			} else {
10834
			} else {
10812
				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
10835
				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
10813
			config->mode_changed = true;
10836
			config->mode_changed = true;
10814
			}
10837
			}
10815
		} else if (set->fb == NULL) {
10838
		} else if (set->fb == NULL) {
10816
			config->mode_changed = true;
10839
			config->mode_changed = true;
10817
		} else if (set->fb->pixel_format !=
10840
		} else if (set->fb->pixel_format !=
10818
			   set->crtc->primary->fb->pixel_format) {
10841
			   set->crtc->primary->fb->pixel_format) {
10819
			config->mode_changed = true;
10842
			config->mode_changed = true;
10820
		} else {
10843
		} else {
10821
			config->fb_changed = true;
10844
			config->fb_changed = true;
10822
	}
10845
	}
10823
	}
10846
	}
10824
 
10847
 
10825
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
10848
	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
10826
		config->fb_changed = true;
10849
		config->fb_changed = true;
10827
 
10850
 
10828
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
10851
	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
10829
		DRM_DEBUG_KMS("modes are different, full mode set\n");
10852
		DRM_DEBUG_KMS("modes are different, full mode set\n");
10830
		drm_mode_debug_printmodeline(&set->crtc->mode);
10853
		drm_mode_debug_printmodeline(&set->crtc->mode);
10831
		drm_mode_debug_printmodeline(set->mode);
10854
		drm_mode_debug_printmodeline(set->mode);
10832
		config->mode_changed = true;
10855
		config->mode_changed = true;
10833
	}
10856
	}
10834
 
10857
 
10835
	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
10858
	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
10836
			set->crtc->base.id, config->mode_changed, config->fb_changed);
10859
			set->crtc->base.id, config->mode_changed, config->fb_changed);
10837
}
10860
}
10838
 
10861
 
10839
static int
10862
static int
10840
intel_modeset_stage_output_state(struct drm_device *dev,
10863
intel_modeset_stage_output_state(struct drm_device *dev,
10841
				 struct drm_mode_set *set,
10864
				 struct drm_mode_set *set,
10842
				 struct intel_set_config *config)
10865
				 struct intel_set_config *config)
10843
{
10866
{
10844
	struct intel_connector *connector;
10867
	struct intel_connector *connector;
10845
	struct intel_encoder *encoder;
10868
	struct intel_encoder *encoder;
10846
	struct intel_crtc *crtc;
10869
	struct intel_crtc *crtc;
10847
	int ro;
10870
	int ro;
10848
 
10871
 
10849
	/* The upper layers ensure that we either disable a crtc or have a list
10872
	/* The upper layers ensure that we either disable a crtc or have a list
10850
	 * of connectors. For paranoia, double-check this. */
10873
	 * of connectors. For paranoia, double-check this. */
10851
	WARN_ON(!set->fb && (set->num_connectors != 0));
10874
	WARN_ON(!set->fb && (set->num_connectors != 0));
10852
	WARN_ON(set->fb && (set->num_connectors == 0));
10875
	WARN_ON(set->fb && (set->num_connectors == 0));
10853
 
10876
 
10854
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10877
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10855
			    base.head) {
10878
			    base.head) {
10856
		/* Otherwise traverse passed in connector list and get encoders
10879
		/* Otherwise traverse passed in connector list and get encoders
10857
		 * for them. */
10880
		 * for them. */
10858
		for (ro = 0; ro < set->num_connectors; ro++) {
10881
		for (ro = 0; ro < set->num_connectors; ro++) {
10859
			if (set->connectors[ro] == &connector->base) {
10882
			if (set->connectors[ro] == &connector->base) {
10860
				connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
10883
				connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
10861
				break;
10884
				break;
10862
			}
10885
			}
10863
		}
10886
		}
10864
 
10887
 
10865
		/* If we disable the crtc, disable all its connectors. Also, if
10888
		/* If we disable the crtc, disable all its connectors. Also, if
10866
		 * the connector is on the changing crtc but not on the new
10889
		 * the connector is on the changing crtc but not on the new
10867
		 * connector list, disable it. */
10890
		 * connector list, disable it. */
10868
		if ((!set->fb || ro == set->num_connectors) &&
10891
		if ((!set->fb || ro == set->num_connectors) &&
10869
		    connector->base.encoder &&
10892
		    connector->base.encoder &&
10870
		    connector->base.encoder->crtc == set->crtc) {
10893
		    connector->base.encoder->crtc == set->crtc) {
10871
			connector->new_encoder = NULL;
10894
			connector->new_encoder = NULL;
10872
 
10895
 
10873
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
10896
			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
10874
				connector->base.base.id,
10897
				connector->base.base.id,
10875
				connector->base.name);
10898
				connector->base.name);
10876
		}
10899
		}
10877
 
10900
 
10878
 
10901
 
10879
		if (&connector->new_encoder->base != connector->base.encoder) {
10902
		if (&connector->new_encoder->base != connector->base.encoder) {
10880
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
10903
			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
10881
			config->mode_changed = true;
10904
			config->mode_changed = true;
10882
		}
10905
		}
10883
	}
10906
	}
10884
	/* connector->new_encoder is now updated for all connectors. */
10907
	/* connector->new_encoder is now updated for all connectors. */
10885
 
10908
 
10886
	/* Update crtc of enabled connectors. */
10909
	/* Update crtc of enabled connectors. */
10887
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10910
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10888
			    base.head) {
10911
			    base.head) {
10889
		struct drm_crtc *new_crtc;
10912
		struct drm_crtc *new_crtc;
10890
 
10913
 
10891
		if (!connector->new_encoder)
10914
		if (!connector->new_encoder)
10892
			continue;
10915
			continue;
10893
 
10916
 
10894
		new_crtc = connector->new_encoder->base.crtc;
10917
		new_crtc = connector->new_encoder->base.crtc;
10895
 
10918
 
10896
		for (ro = 0; ro < set->num_connectors; ro++) {
10919
		for (ro = 0; ro < set->num_connectors; ro++) {
10897
			if (set->connectors[ro] == &connector->base)
10920
			if (set->connectors[ro] == &connector->base)
10898
				new_crtc = set->crtc;
10921
				new_crtc = set->crtc;
10899
		}
10922
		}
10900
 
10923
 
10901
		/* Make sure the new CRTC will work with the encoder */
10924
		/* Make sure the new CRTC will work with the encoder */
10902
		if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
10925
		if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
10903
					   new_crtc)) {
10926
					   new_crtc)) {
10904
			return -EINVAL;
10927
			return -EINVAL;
10905
		}
10928
		}
10906
		connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
10929
		connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
10907
 
10930
 
10908
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
10931
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
10909
			connector->base.base.id,
10932
			connector->base.base.id,
10910
			connector->base.name,
10933
			connector->base.name,
10911
			new_crtc->base.id);
10934
			new_crtc->base.id);
10912
	}
10935
	}
10913
 
10936
 
10914
	/* Check for any encoders that needs to be disabled. */
10937
	/* Check for any encoders that needs to be disabled. */
10915
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10938
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10916
			    base.head) {
10939
			    base.head) {
10917
		int num_connectors = 0;
10940
		int num_connectors = 0;
10918
		list_for_each_entry(connector,
10941
		list_for_each_entry(connector,
10919
				    &dev->mode_config.connector_list,
10942
				    &dev->mode_config.connector_list,
10920
				    base.head) {
10943
				    base.head) {
10921
			if (connector->new_encoder == encoder) {
10944
			if (connector->new_encoder == encoder) {
10922
				WARN_ON(!connector->new_encoder->new_crtc);
10945
				WARN_ON(!connector->new_encoder->new_crtc);
10923
				num_connectors++;
10946
				num_connectors++;
10924
			}
10947
			}
10925
		}
10948
		}
10926
 
10949
 
10927
		if (num_connectors == 0)
10950
		if (num_connectors == 0)
10928
		encoder->new_crtc = NULL;
10951
		encoder->new_crtc = NULL;
10929
		else if (num_connectors > 1)
10952
		else if (num_connectors > 1)
10930
			return -EINVAL;
10953
			return -EINVAL;
10931
 
10954
 
10932
		/* Only now check for crtc changes so we don't miss encoders
10955
		/* Only now check for crtc changes so we don't miss encoders
10933
		 * that will be disabled. */
10956
		 * that will be disabled. */
10934
		if (&encoder->new_crtc->base != encoder->base.crtc) {
10957
		if (&encoder->new_crtc->base != encoder->base.crtc) {
10935
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
10958
			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
10936
			config->mode_changed = true;
10959
			config->mode_changed = true;
10937
		}
10960
		}
10938
	}
10961
	}
10939
	/* Now we've also updated encoder->new_crtc for all encoders. */
10962
	/* Now we've also updated encoder->new_crtc for all encoders. */
10940
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10963
	list_for_each_entry(connector, &dev->mode_config.connector_list,
10941
			    base.head) {
10964
			    base.head) {
10942
		if (connector->new_encoder)
10965
		if (connector->new_encoder)
10943
			if (connector->new_encoder != connector->encoder)
10966
			if (connector->new_encoder != connector->encoder)
10944
				connector->encoder = connector->new_encoder;
10967
				connector->encoder = connector->new_encoder;
10945
	}
10968
	}
10946
	for_each_intel_crtc(dev, crtc) {
10969
	for_each_intel_crtc(dev, crtc) {
10947
		crtc->new_enabled = false;
10970
		crtc->new_enabled = false;
10948
 
10971
 
10949
		list_for_each_entry(encoder,
10972
		list_for_each_entry(encoder,
10950
				    &dev->mode_config.encoder_list,
10973
				    &dev->mode_config.encoder_list,
10951
				    base.head) {
10974
				    base.head) {
10952
			if (encoder->new_crtc == crtc) {
10975
			if (encoder->new_crtc == crtc) {
10953
				crtc->new_enabled = true;
10976
				crtc->new_enabled = true;
10954
				break;
10977
				break;
10955
			}
10978
			}
10956
		}
10979
		}
10957
 
10980
 
10958
		if (crtc->new_enabled != crtc->base.enabled) {
10981
		if (crtc->new_enabled != crtc->base.enabled) {
10959
			DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
10982
			DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
10960
				      crtc->new_enabled ? "en" : "dis");
10983
				      crtc->new_enabled ? "en" : "dis");
10961
			config->mode_changed = true;
10984
			config->mode_changed = true;
10962
		}
10985
		}
10963
 
10986
 
10964
		if (crtc->new_enabled)
10987
		if (crtc->new_enabled)
10965
			crtc->new_config = &crtc->config;
10988
			crtc->new_config = &crtc->config;
10966
		else
10989
		else
10967
			crtc->new_config = NULL;
10990
			crtc->new_config = NULL;
10968
	}
10991
	}
10969
 
10992
 
10970
	return 0;
10993
	return 0;
10971
}
10994
}
10972
 
10995
 
10973
static void disable_crtc_nofb(struct intel_crtc *crtc)
10996
static void disable_crtc_nofb(struct intel_crtc *crtc)
10974
{
10997
{
10975
	struct drm_device *dev = crtc->base.dev;
10998
	struct drm_device *dev = crtc->base.dev;
10976
	struct intel_encoder *encoder;
10999
	struct intel_encoder *encoder;
10977
	struct intel_connector *connector;
11000
	struct intel_connector *connector;
10978
 
11001
 
10979
	DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
11002
	DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
10980
		      pipe_name(crtc->pipe));
11003
		      pipe_name(crtc->pipe));
10981
 
11004
 
10982
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11005
	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
10983
		if (connector->new_encoder &&
11006
		if (connector->new_encoder &&
10984
		    connector->new_encoder->new_crtc == crtc)
11007
		    connector->new_encoder->new_crtc == crtc)
10985
			connector->new_encoder = NULL;
11008
			connector->new_encoder = NULL;
10986
	}
11009
	}
10987
 
11010
 
10988
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
11011
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10989
		if (encoder->new_crtc == crtc)
11012
		if (encoder->new_crtc == crtc)
10990
			encoder->new_crtc = NULL;
11013
			encoder->new_crtc = NULL;
10991
	}
11014
	}
10992
 
11015
 
10993
	crtc->new_enabled = false;
11016
	crtc->new_enabled = false;
10994
	crtc->new_config = NULL;
11017
	crtc->new_config = NULL;
10995
}
11018
}
10996
 
11019
 
10997
static int intel_crtc_set_config(struct drm_mode_set *set)
11020
static int intel_crtc_set_config(struct drm_mode_set *set)
10998
{
11021
{
10999
	struct drm_device *dev;
11022
	struct drm_device *dev;
11000
	struct drm_mode_set save_set;
11023
	struct drm_mode_set save_set;
11001
	struct intel_set_config *config;
11024
	struct intel_set_config *config;
11002
	int ret;
11025
	int ret;
11003
 
11026
 
11004
	BUG_ON(!set);
11027
	BUG_ON(!set);
11005
	BUG_ON(!set->crtc);
11028
	BUG_ON(!set->crtc);
11006
	BUG_ON(!set->crtc->helper_private);
11029
	BUG_ON(!set->crtc->helper_private);
11007
 
11030
 
11008
	/* Enforce sane interface api - has been abused by the fb helper. */
11031
	/* Enforce sane interface api - has been abused by the fb helper. */
11009
	BUG_ON(!set->mode && set->fb);
11032
	BUG_ON(!set->mode && set->fb);
11010
	BUG_ON(set->fb && set->num_connectors == 0);
11033
	BUG_ON(set->fb && set->num_connectors == 0);
11011
 
11034
 
11012
	if (set->fb) {
11035
	if (set->fb) {
11013
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
11036
		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
11014
				set->crtc->base.id, set->fb->base.id,
11037
				set->crtc->base.id, set->fb->base.id,
11015
				(int)set->num_connectors, set->x, set->y);
11038
				(int)set->num_connectors, set->x, set->y);
11016
	} else {
11039
	} else {
11017
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
11040
		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
11018
	}
11041
	}
11019
 
11042
 
11020
	dev = set->crtc->dev;
11043
	dev = set->crtc->dev;
11021
 
11044
 
11022
	ret = -ENOMEM;
11045
	ret = -ENOMEM;
11023
	config = kzalloc(sizeof(*config), GFP_KERNEL);
11046
	config = kzalloc(sizeof(*config), GFP_KERNEL);
11024
	if (!config)
11047
	if (!config)
11025
		goto out_config;
11048
		goto out_config;
11026
 
11049
 
11027
	ret = intel_set_config_save_state(dev, config);
11050
	ret = intel_set_config_save_state(dev, config);
11028
	if (ret)
11051
	if (ret)
11029
		goto out_config;
11052
		goto out_config;
11030
 
11053
 
11031
	save_set.crtc = set->crtc;
11054
	save_set.crtc = set->crtc;
11032
	save_set.mode = &set->crtc->mode;
11055
	save_set.mode = &set->crtc->mode;
11033
	save_set.x = set->crtc->x;
11056
	save_set.x = set->crtc->x;
11034
	save_set.y = set->crtc->y;
11057
	save_set.y = set->crtc->y;
11035
	save_set.fb = set->crtc->primary->fb;
11058
	save_set.fb = set->crtc->primary->fb;
11036
 
11059
 
11037
	/* Compute whether we need a full modeset, only an fb base update or no
11060
	/* Compute whether we need a full modeset, only an fb base update or no
11038
	 * change at all. In the future we might also check whether only the
11061
	 * change at all. In the future we might also check whether only the
11039
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
11062
	 * mode changed, e.g. for LVDS where we only change the panel fitter in
11040
	 * such cases. */
11063
	 * such cases. */
11041
	intel_set_config_compute_mode_changes(set, config);
11064
	intel_set_config_compute_mode_changes(set, config);
11042
 
11065
 
11043
	ret = intel_modeset_stage_output_state(dev, set, config);
11066
	ret = intel_modeset_stage_output_state(dev, set, config);
11044
	if (ret)
11067
	if (ret)
11045
		goto fail;
11068
		goto fail;
11046
 
11069
 
11047
	if (config->mode_changed) {
11070
	if (config->mode_changed) {
11048
		ret = intel_set_mode(set->crtc, set->mode,
11071
		ret = intel_set_mode(set->crtc, set->mode,
11049
				     set->x, set->y, set->fb);
11072
				     set->x, set->y, set->fb);
11050
	} else if (config->fb_changed) {
11073
	} else if (config->fb_changed) {
11051
		struct drm_i915_private *dev_priv = dev->dev_private;
11074
		struct drm_i915_private *dev_priv = dev->dev_private;
11052
		struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
11075
		struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
11053
 
11076
 
11054
 
11077
 
11055
		ret = intel_pipe_set_base(set->crtc,
11078
		ret = intel_pipe_set_base(set->crtc,
11056
					  set->x, set->y, set->fb);
11079
					  set->x, set->y, set->fb);
11057
 
11080
 
11058
		/*
11081
		/*
11059
		 * We need to make sure the primary plane is re-enabled if it
11082
		 * We need to make sure the primary plane is re-enabled if it
11060
		 * has previously been turned off.
11083
		 * has previously been turned off.
11061
		 */
11084
		 */
11062
		if (!intel_crtc->primary_enabled && ret == 0) {
11085
		if (!intel_crtc->primary_enabled && ret == 0) {
11063
			WARN_ON(!intel_crtc->active);
11086
			WARN_ON(!intel_crtc->active);
11064
			intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
11087
			intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
11065
						      intel_crtc->pipe);
11088
						      intel_crtc->pipe);
11066
		}
11089
		}
11067
 
11090
 
11068
		/*
11091
		/*
11069
		 * In the fastboot case this may be our only check of the
11092
		 * In the fastboot case this may be our only check of the
11070
		 * state after boot.  It would be better to only do it on
11093
		 * state after boot.  It would be better to only do it on
11071
		 * the first update, but we don't have a nice way of doing that
11094
		 * the first update, but we don't have a nice way of doing that
11072
		 * (and really, set_config isn't used much for high freq page
11095
		 * (and really, set_config isn't used much for high freq page
11073
		 * flipping, so increasing its cost here shouldn't be a big
11096
		 * flipping, so increasing its cost here shouldn't be a big
11074
		 * deal).
11097
		 * deal).
11075
		 */
11098
		 */
11076
		if (i915.fastboot && ret == 0)
11099
		if (i915.fastboot && ret == 0)
11077
			intel_modeset_check_state(set->crtc->dev);
11100
			intel_modeset_check_state(set->crtc->dev);
11078
	}
11101
	}
11079
 
11102
 
11080
	if (ret) {
11103
	if (ret) {
11081
		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
11104
		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
11082
			  set->crtc->base.id, ret);
11105
			  set->crtc->base.id, ret);
11083
fail:
11106
fail:
11084
	intel_set_config_restore_state(dev, config);
11107
	intel_set_config_restore_state(dev, config);
11085
 
11108
 
11086
		/*
11109
		/*
11087
		 * HACK: if the pipe was on, but we didn't have a framebuffer,
11110
		 * HACK: if the pipe was on, but we didn't have a framebuffer,
11088
		 * force the pipe off to avoid oopsing in the modeset code
11111
		 * force the pipe off to avoid oopsing in the modeset code
11089
		 * due to fb==NULL. This should only happen during boot since
11112
		 * due to fb==NULL. This should only happen during boot since
11090
		 * we don't yet reconstruct the FB from the hardware state.
11113
		 * we don't yet reconstruct the FB from the hardware state.
11091
		 */
11114
		 */
11092
		if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
11115
		if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
11093
			disable_crtc_nofb(to_intel_crtc(save_set.crtc));
11116
			disable_crtc_nofb(to_intel_crtc(save_set.crtc));
11094
 
11117
 
11095
	/* Try to restore the config */
11118
	/* Try to restore the config */
11096
	if (config->mode_changed &&
11119
	if (config->mode_changed &&
11097
	    intel_set_mode(save_set.crtc, save_set.mode,
11120
	    intel_set_mode(save_set.crtc, save_set.mode,
11098
			    save_set.x, save_set.y, save_set.fb))
11121
			    save_set.x, save_set.y, save_set.fb))
11099
		DRM_ERROR("failed to restore config after modeset failure\n");
11122
		DRM_ERROR("failed to restore config after modeset failure\n");
11100
	}
11123
	}
11101
 
11124
 
11102
out_config:
11125
out_config:
11103
	intel_set_config_free(config);
11126
	intel_set_config_free(config);
11104
	return ret;
11127
	return ret;
11105
}
11128
}
11106
 
11129
 
11107
static const struct drm_crtc_funcs intel_crtc_funcs = {
11130
static const struct drm_crtc_funcs intel_crtc_funcs = {
11108
	.gamma_set = intel_crtc_gamma_set,
11131
	.gamma_set = intel_crtc_gamma_set,
11109
	.set_config = intel_crtc_set_config,
11132
	.set_config = intel_crtc_set_config,
11110
	.destroy = intel_crtc_destroy,
11133
	.destroy = intel_crtc_destroy,
11111
//	.page_flip = intel_crtc_page_flip,
11134
//	.page_flip = intel_crtc_page_flip,
11112
};
11135
};
11113
 
11136
 
11114
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11137
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11115
				      struct intel_shared_dpll *pll,
11138
				      struct intel_shared_dpll *pll,
11116
				      struct intel_dpll_hw_state *hw_state)
11139
				      struct intel_dpll_hw_state *hw_state)
11117
{
11140
{
11118
	uint32_t val;
11141
	uint32_t val;
11119
 
11142
 
11120
	if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
11143
	if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
11121
		return false;
11144
		return false;
11122
 
11145
 
11123
	val = I915_READ(PCH_DPLL(pll->id));
11146
	val = I915_READ(PCH_DPLL(pll->id));
11124
	hw_state->dpll = val;
11147
	hw_state->dpll = val;
11125
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
11148
	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
11126
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
11149
	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
11127
 
11150
 
11128
	return val & DPLL_VCO_ENABLE;
11151
	return val & DPLL_VCO_ENABLE;
11129
}
11152
}
11130
 
11153
 
11131
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
11154
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
11132
				  struct intel_shared_dpll *pll)
11155
				  struct intel_shared_dpll *pll)
11133
{
11156
{
11134
	I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
11157
	I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
11135
	I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
11158
	I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
11136
}
11159
}
11137
 
11160
 
11138
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11161
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11139
				struct intel_shared_dpll *pll)
11162
				struct intel_shared_dpll *pll)
11140
{
11163
{
11141
	/* PCH refclock must be enabled first */
11164
	/* PCH refclock must be enabled first */
11142
	ibx_assert_pch_refclk_enabled(dev_priv);
11165
	ibx_assert_pch_refclk_enabled(dev_priv);
11143
 
11166
 
11144
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
11167
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
11145
 
11168
 
11146
	/* Wait for the clocks to stabilize. */
11169
	/* Wait for the clocks to stabilize. */
11147
	POSTING_READ(PCH_DPLL(pll->id));
11170
	POSTING_READ(PCH_DPLL(pll->id));
11148
	udelay(150);
11171
	udelay(150);
11149
 
11172
 
11150
	/* The pixel multiplier can only be updated once the
11173
	/* The pixel multiplier can only be updated once the
11151
	 * DPLL is enabled and the clocks are stable.
11174
	 * DPLL is enabled and the clocks are stable.
11152
	 *
11175
	 *
11153
	 * So write it again.
11176
	 * So write it again.
11154
	 */
11177
	 */
11155
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
11178
	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
11156
	POSTING_READ(PCH_DPLL(pll->id));
11179
	POSTING_READ(PCH_DPLL(pll->id));
11157
	udelay(200);
11180
	udelay(200);
11158
}
11181
}
11159
 
11182
 
11160
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
11183
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
11161
				 struct intel_shared_dpll *pll)
11184
				 struct intel_shared_dpll *pll)
11162
{
11185
{
11163
	struct drm_device *dev = dev_priv->dev;
11186
	struct drm_device *dev = dev_priv->dev;
11164
	struct intel_crtc *crtc;
11187
	struct intel_crtc *crtc;
11165
 
11188
 
11166
	/* Make sure no transcoder isn't still depending on us. */
11189
	/* Make sure no transcoder isn't still depending on us. */
11167
	for_each_intel_crtc(dev, crtc) {
11190
	for_each_intel_crtc(dev, crtc) {
11168
		if (intel_crtc_to_shared_dpll(crtc) == pll)
11191
		if (intel_crtc_to_shared_dpll(crtc) == pll)
11169
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
11192
			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
11170
	}
11193
	}
11171
 
11194
 
11172
	I915_WRITE(PCH_DPLL(pll->id), 0);
11195
	I915_WRITE(PCH_DPLL(pll->id), 0);
11173
	POSTING_READ(PCH_DPLL(pll->id));
11196
	POSTING_READ(PCH_DPLL(pll->id));
11174
	udelay(200);
11197
	udelay(200);
11175
}
11198
}
11176
 
11199
 
11177
static char *ibx_pch_dpll_names[] = {
11200
static char *ibx_pch_dpll_names[] = {
11178
	"PCH DPLL A",
11201
	"PCH DPLL A",
11179
	"PCH DPLL B",
11202
	"PCH DPLL B",
11180
};
11203
};
11181
 
11204
 
11182
static void ibx_pch_dpll_init(struct drm_device *dev)
11205
static void ibx_pch_dpll_init(struct drm_device *dev)
11183
{
11206
{
11184
	struct drm_i915_private *dev_priv = dev->dev_private;
11207
	struct drm_i915_private *dev_priv = dev->dev_private;
11185
	int i;
11208
	int i;
11186
 
11209
 
11187
	dev_priv->num_shared_dpll = 2;
11210
	dev_priv->num_shared_dpll = 2;
11188
 
11211
 
11189
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11212
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11190
		dev_priv->shared_dplls[i].id = i;
11213
		dev_priv->shared_dplls[i].id = i;
11191
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
11214
		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
11192
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
11215
		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
11193
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
11216
		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
11194
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
11217
		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
11195
		dev_priv->shared_dplls[i].get_hw_state =
11218
		dev_priv->shared_dplls[i].get_hw_state =
11196
			ibx_pch_dpll_get_hw_state;
11219
			ibx_pch_dpll_get_hw_state;
11197
	}
11220
	}
11198
}
11221
}
11199
 
11222
 
11200
static void intel_shared_dpll_init(struct drm_device *dev)
11223
static void intel_shared_dpll_init(struct drm_device *dev)
11201
{
11224
{
11202
	struct drm_i915_private *dev_priv = dev->dev_private;
11225
	struct drm_i915_private *dev_priv = dev->dev_private;
11203
 
11226
 
11204
	if (HAS_DDI(dev))
11227
	if (HAS_DDI(dev))
11205
		intel_ddi_pll_init(dev);
11228
		intel_ddi_pll_init(dev);
11206
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
11229
	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
11207
		ibx_pch_dpll_init(dev);
11230
		ibx_pch_dpll_init(dev);
11208
	else
11231
	else
11209
		dev_priv->num_shared_dpll = 0;
11232
		dev_priv->num_shared_dpll = 0;
11210
 
11233
 
11211
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
11234
	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
11212
}
11235
}
11213
 
11236
 
11214
static int
11237
static int
11215
intel_primary_plane_disable(struct drm_plane *plane)
11238
intel_primary_plane_disable(struct drm_plane *plane)
11216
{
11239
{
11217
	struct drm_device *dev = plane->dev;
11240
	struct drm_device *dev = plane->dev;
11218
	struct drm_i915_private *dev_priv = dev->dev_private;
11241
	struct drm_i915_private *dev_priv = dev->dev_private;
11219
	struct intel_plane *intel_plane = to_intel_plane(plane);
11242
	struct intel_plane *intel_plane = to_intel_plane(plane);
11220
	struct intel_crtc *intel_crtc;
11243
	struct intel_crtc *intel_crtc;
11221
 
11244
 
11222
	if (!plane->fb)
11245
	if (!plane->fb)
11223
		return 0;
11246
		return 0;
11224
 
11247
 
11225
	BUG_ON(!plane->crtc);
11248
	BUG_ON(!plane->crtc);
11226
 
11249
 
11227
	intel_crtc = to_intel_crtc(plane->crtc);
11250
	intel_crtc = to_intel_crtc(plane->crtc);
11228
 
11251
 
11229
	/*
11252
	/*
11230
	 * Even though we checked plane->fb above, it's still possible that
11253
	 * Even though we checked plane->fb above, it's still possible that
11231
	 * the primary plane has been implicitly disabled because the crtc
11254
	 * the primary plane has been implicitly disabled because the crtc
11232
	 * coordinates given weren't visible, or because we detected
11255
	 * coordinates given weren't visible, or because we detected
11233
	 * that it was 100% covered by a sprite plane.  Or, the CRTC may be
11256
	 * that it was 100% covered by a sprite plane.  Or, the CRTC may be
11234
	 * off and we've set a fb, but haven't actually turned on the CRTC yet.
11257
	 * off and we've set a fb, but haven't actually turned on the CRTC yet.
11235
	 * In either case, we need to unpin the FB and let the fb pointer get
11258
	 * In either case, we need to unpin the FB and let the fb pointer get
11236
	 * updated, but otherwise we don't need to touch the hardware.
11259
	 * updated, but otherwise we don't need to touch the hardware.
11237
	 */
11260
	 */
11238
	if (!intel_crtc->primary_enabled)
11261
	if (!intel_crtc->primary_enabled)
11239
		goto disable_unpin;
11262
		goto disable_unpin;
11240
 
11263
 
11241
	intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
11264
	intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
11242
				       intel_plane->pipe);
11265
				       intel_plane->pipe);
11243
disable_unpin:
11266
disable_unpin:
11244
	mutex_lock(&dev->struct_mutex);
11267
	mutex_lock(&dev->struct_mutex);
11245
	i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
11268
	i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
11246
			  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11269
			  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11247
	intel_unpin_fb_obj(intel_fb_obj(plane->fb));
11270
	intel_unpin_fb_obj(intel_fb_obj(plane->fb));
11248
	mutex_unlock(&dev->struct_mutex);
11271
	mutex_unlock(&dev->struct_mutex);
11249
	plane->fb = NULL;
11272
	plane->fb = NULL;
11250
 
11273
 
11251
	return 0;
11274
	return 0;
11252
}
11275
}
11253
 
11276
 
11254
static int
11277
static int
11255
intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11278
intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11256
			     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11279
			     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11257
			     unsigned int crtc_w, unsigned int crtc_h,
11280
			     unsigned int crtc_w, unsigned int crtc_h,
11258
			     uint32_t src_x, uint32_t src_y,
11281
			     uint32_t src_x, uint32_t src_y,
11259
			     uint32_t src_w, uint32_t src_h)
11282
			     uint32_t src_w, uint32_t src_h)
11260
{
11283
{
11261
	struct drm_device *dev = crtc->dev;
11284
	struct drm_device *dev = crtc->dev;
11262
	struct drm_i915_private *dev_priv = dev->dev_private;
11285
	struct drm_i915_private *dev_priv = dev->dev_private;
11263
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11286
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11264
	struct intel_plane *intel_plane = to_intel_plane(plane);
11287
	struct intel_plane *intel_plane = to_intel_plane(plane);
11265
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11288
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11266
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11289
	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11267
	struct drm_rect dest = {
11290
	struct drm_rect dest = {
11268
		/* integer pixels */
11291
		/* integer pixels */
11269
		.x1 = crtc_x,
11292
		.x1 = crtc_x,
11270
		.y1 = crtc_y,
11293
		.y1 = crtc_y,
11271
		.x2 = crtc_x + crtc_w,
11294
		.x2 = crtc_x + crtc_w,
11272
		.y2 = crtc_y + crtc_h,
11295
		.y2 = crtc_y + crtc_h,
11273
	};
11296
	};
11274
	struct drm_rect src = {
11297
	struct drm_rect src = {
11275
		/* 16.16 fixed point */
11298
		/* 16.16 fixed point */
11276
		.x1 = src_x,
11299
		.x1 = src_x,
11277
		.y1 = src_y,
11300
		.y1 = src_y,
11278
		.x2 = src_x + src_w,
11301
		.x2 = src_x + src_w,
11279
		.y2 = src_y + src_h,
11302
		.y2 = src_y + src_h,
11280
	};
11303
	};
11281
	const struct drm_rect clip = {
11304
	const struct drm_rect clip = {
11282
		/* integer pixels */
11305
		/* integer pixels */
11283
		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11306
		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11284
		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11307
		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11285
	};
11308
	};
11286
	bool visible;
11309
	bool visible;
11287
	int ret;
11310
	int ret;
11288
 
11311
 
11289
	ret = drm_plane_helper_check_update(plane, crtc, fb,
11312
	ret = drm_plane_helper_check_update(plane, crtc, fb,
11290
					    &src, &dest, &clip,
11313
					    &src, &dest, &clip,
11291
					    DRM_PLANE_HELPER_NO_SCALING,
11314
					    DRM_PLANE_HELPER_NO_SCALING,
11292
					    DRM_PLANE_HELPER_NO_SCALING,
11315
					    DRM_PLANE_HELPER_NO_SCALING,
11293
					    false, true, &visible);
11316
					    false, true, &visible);
11294
 
11317
 
11295
	if (ret)
11318
	if (ret)
11296
		return ret;
11319
		return ret;
11297
 
11320
 
11298
	/*
11321
	/*
11299
	 * If the CRTC isn't enabled, we're just pinning the framebuffer,
11322
	 * If the CRTC isn't enabled, we're just pinning the framebuffer,
11300
	 * updating the fb pointer, and returning without touching the
11323
	 * updating the fb pointer, and returning without touching the
11301
	 * hardware.  This allows us to later do a drmModeSetCrtc with fb=-1 to
11324
	 * hardware.  This allows us to later do a drmModeSetCrtc with fb=-1 to
11302
	 * turn on the display with all planes setup as desired.
11325
	 * turn on the display with all planes setup as desired.
11303
	 */
11326
	 */
11304
	if (!crtc->enabled) {
11327
	if (!crtc->enabled) {
11305
		mutex_lock(&dev->struct_mutex);
11328
		mutex_lock(&dev->struct_mutex);
11306
 
11329
 
11307
		/*
11330
		/*
11308
		 * If we already called setplane while the crtc was disabled,
11331
		 * If we already called setplane while the crtc was disabled,
11309
		 * we may have an fb pinned; unpin it.
11332
		 * we may have an fb pinned; unpin it.
11310
		 */
11333
		 */
11311
		if (plane->fb)
11334
		if (plane->fb)
11312
			intel_unpin_fb_obj(old_obj);
11335
			intel_unpin_fb_obj(old_obj);
11313
 
11336
 
11314
		i915_gem_track_fb(old_obj, obj,
11337
		i915_gem_track_fb(old_obj, obj,
11315
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11338
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11316
 
11339
 
11317
		/* Pin and return without programming hardware */
11340
		/* Pin and return without programming hardware */
11318
		ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11341
		ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11319
		mutex_unlock(&dev->struct_mutex);
11342
		mutex_unlock(&dev->struct_mutex);
11320
 
11343
 
11321
		return ret;
11344
		return ret;
11322
	}
11345
	}
11323
 
11346
 
11324
 
11347
 
11325
	/*
11348
	/*
11326
	 * If clipping results in a non-visible primary plane, we'll disable
11349
	 * If clipping results in a non-visible primary plane, we'll disable
11327
	 * the primary plane.  Note that this is a bit different than what
11350
	 * the primary plane.  Note that this is a bit different than what
11328
	 * happens if userspace explicitly disables the plane by passing fb=0
11351
	 * happens if userspace explicitly disables the plane by passing fb=0
11329
	 * because plane->fb still gets set and pinned.
11352
	 * because plane->fb still gets set and pinned.
11330
	 */
11353
	 */
11331
	if (!visible) {
11354
	if (!visible) {
11332
		mutex_lock(&dev->struct_mutex);
11355
		mutex_lock(&dev->struct_mutex);
11333
 
11356
 
11334
		/*
11357
		/*
11335
		 * Try to pin the new fb first so that we can bail out if we
11358
		 * Try to pin the new fb first so that we can bail out if we
11336
		 * fail.
11359
		 * fail.
11337
		 */
11360
		 */
11338
		if (plane->fb != fb) {
11361
		if (plane->fb != fb) {
11339
			ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11362
			ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11340
			if (ret) {
11363
			if (ret) {
11341
				mutex_unlock(&dev->struct_mutex);
11364
				mutex_unlock(&dev->struct_mutex);
11342
				return ret;
11365
				return ret;
11343
			}
11366
			}
11344
		}
11367
		}
11345
 
11368
 
11346
		i915_gem_track_fb(old_obj, obj,
11369
		i915_gem_track_fb(old_obj, obj,
11347
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11370
				  INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11348
 
11371
 
11349
		if (intel_crtc->primary_enabled)
11372
		if (intel_crtc->primary_enabled)
11350
			intel_disable_primary_hw_plane(dev_priv,
11373
			intel_disable_primary_hw_plane(dev_priv,
11351
						       intel_plane->plane,
11374
						       intel_plane->plane,
11352
						       intel_plane->pipe);
11375
						       intel_plane->pipe);
11353
 
11376
 
11354
 
11377
 
11355
		if (plane->fb != fb)
11378
		if (plane->fb != fb)
11356
			if (plane->fb)
11379
			if (plane->fb)
11357
				intel_unpin_fb_obj(old_obj);
11380
				intel_unpin_fb_obj(old_obj);
11358
 
11381
 
11359
		mutex_unlock(&dev->struct_mutex);
11382
		mutex_unlock(&dev->struct_mutex);
11360
 
11383
 
11361
		return 0;
11384
		return 0;
11362
	}
11385
	}
11363
 
11386
 
11364
	ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
11387
	ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
11365
	if (ret)
11388
	if (ret)
11366
		return ret;
11389
		return ret;
11367
 
11390
 
11368
	if (!intel_crtc->primary_enabled)
11391
	if (!intel_crtc->primary_enabled)
11369
		intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
11392
		intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
11370
					      intel_crtc->pipe);
11393
					      intel_crtc->pipe);
11371
 
11394
 
11372
	return 0;
11395
	return 0;
11373
}
11396
}
11374
 
11397
 
11375
/* Common destruction function for both primary and cursor planes */
11398
/* Common destruction function for both primary and cursor planes */
11376
static void intel_plane_destroy(struct drm_plane *plane)
11399
static void intel_plane_destroy(struct drm_plane *plane)
11377
{
11400
{
11378
	struct intel_plane *intel_plane = to_intel_plane(plane);
11401
	struct intel_plane *intel_plane = to_intel_plane(plane);
11379
	drm_plane_cleanup(plane);
11402
	drm_plane_cleanup(plane);
11380
	kfree(intel_plane);
11403
	kfree(intel_plane);
11381
}
11404
}
11382
 
11405
 
11383
static const struct drm_plane_funcs intel_primary_plane_funcs = {
11406
static const struct drm_plane_funcs intel_primary_plane_funcs = {
11384
	.update_plane = intel_primary_plane_setplane,
11407
	.update_plane = intel_primary_plane_setplane,
11385
	.disable_plane = intel_primary_plane_disable,
11408
	.disable_plane = intel_primary_plane_disable,
11386
	.destroy = intel_plane_destroy,
11409
	.destroy = intel_plane_destroy,
11387
};
11410
};
11388
 
11411
 
11389
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11412
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11390
						    int pipe)
11413
						    int pipe)
11391
{
11414
{
11392
	struct intel_plane *primary;
11415
	struct intel_plane *primary;
11393
	const uint32_t *intel_primary_formats;
11416
	const uint32_t *intel_primary_formats;
11394
	int num_formats;
11417
	int num_formats;
11395
 
11418
 
11396
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
11419
	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
11397
	if (primary == NULL)
11420
	if (primary == NULL)
11398
		return NULL;
11421
		return NULL;
11399
 
11422
 
11400
	primary->can_scale = false;
11423
	primary->can_scale = false;
11401
	primary->max_downscale = 1;
11424
	primary->max_downscale = 1;
11402
	primary->pipe = pipe;
11425
	primary->pipe = pipe;
11403
	primary->plane = pipe;
11426
	primary->plane = pipe;
11404
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
11427
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
11405
		primary->plane = !pipe;
11428
		primary->plane = !pipe;
11406
 
11429
 
11407
	if (INTEL_INFO(dev)->gen <= 3) {
11430
	if (INTEL_INFO(dev)->gen <= 3) {
11408
		intel_primary_formats = intel_primary_formats_gen2;
11431
		intel_primary_formats = intel_primary_formats_gen2;
11409
		num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
11432
		num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
11410
	} else {
11433
	} else {
11411
		intel_primary_formats = intel_primary_formats_gen4;
11434
		intel_primary_formats = intel_primary_formats_gen4;
11412
		num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
11435
		num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
11413
	}
11436
	}
11414
 
11437
 
11415
	drm_universal_plane_init(dev, &primary->base, 0,
11438
	drm_universal_plane_init(dev, &primary->base, 0,
11416
				 &intel_primary_plane_funcs,
11439
				 &intel_primary_plane_funcs,
11417
				 intel_primary_formats, num_formats,
11440
				 intel_primary_formats, num_formats,
11418
				 DRM_PLANE_TYPE_PRIMARY);
11441
				 DRM_PLANE_TYPE_PRIMARY);
11419
	return &primary->base;
11442
	return &primary->base;
11420
}
11443
}
11421
 
11444
 
11422
static int
11445
static int
11423
intel_cursor_plane_disable(struct drm_plane *plane)
11446
intel_cursor_plane_disable(struct drm_plane *plane)
11424
{
11447
{
11425
	if (!plane->fb)
11448
	if (!plane->fb)
11426
		return 0;
11449
		return 0;
11427
 
11450
 
11428
	BUG_ON(!plane->crtc);
11451
	BUG_ON(!plane->crtc);
11429
 
11452
 
11430
	return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
11453
	return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
11431
}
11454
}
11432
 
11455
 
11433
static int
11456
static int
11434
intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11457
intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11435
			  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11458
			  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11436
			  unsigned int crtc_w, unsigned int crtc_h,
11459
			  unsigned int crtc_w, unsigned int crtc_h,
11437
			  uint32_t src_x, uint32_t src_y,
11460
			  uint32_t src_x, uint32_t src_y,
11438
			  uint32_t src_w, uint32_t src_h)
11461
			  uint32_t src_w, uint32_t src_h)
11439
{
11462
{
11440
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11463
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11441
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11464
	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11442
	struct drm_i915_gem_object *obj = intel_fb->obj;
11465
	struct drm_i915_gem_object *obj = intel_fb->obj;
11443
	struct drm_rect dest = {
11466
	struct drm_rect dest = {
11444
		/* integer pixels */
11467
		/* integer pixels */
11445
		.x1 = crtc_x,
11468
		.x1 = crtc_x,
11446
		.y1 = crtc_y,
11469
		.y1 = crtc_y,
11447
		.x2 = crtc_x + crtc_w,
11470
		.x2 = crtc_x + crtc_w,
11448
		.y2 = crtc_y + crtc_h,
11471
		.y2 = crtc_y + crtc_h,
11449
	};
11472
	};
11450
	struct drm_rect src = {
11473
	struct drm_rect src = {
11451
		/* 16.16 fixed point */
11474
		/* 16.16 fixed point */
11452
		.x1 = src_x,
11475
		.x1 = src_x,
11453
		.y1 = src_y,
11476
		.y1 = src_y,
11454
		.x2 = src_x + src_w,
11477
		.x2 = src_x + src_w,
11455
		.y2 = src_y + src_h,
11478
		.y2 = src_y + src_h,
11456
	};
11479
	};
11457
	const struct drm_rect clip = {
11480
	const struct drm_rect clip = {
11458
		/* integer pixels */
11481
		/* integer pixels */
11459
		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11482
		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11460
		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11483
		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11461
	};
11484
	};
11462
	bool visible;
11485
	bool visible;
11463
	int ret;
11486
	int ret;
11464
 
11487
 
11465
	ret = drm_plane_helper_check_update(plane, crtc, fb,
11488
	ret = drm_plane_helper_check_update(plane, crtc, fb,
11466
					    &src, &dest, &clip,
11489
					    &src, &dest, &clip,
11467
					    DRM_PLANE_HELPER_NO_SCALING,
11490
					    DRM_PLANE_HELPER_NO_SCALING,
11468
					    DRM_PLANE_HELPER_NO_SCALING,
11491
					    DRM_PLANE_HELPER_NO_SCALING,
11469
					    true, true, &visible);
11492
					    true, true, &visible);
11470
	if (ret)
11493
	if (ret)
11471
		return ret;
11494
		return ret;
11472
 
11495
 
11473
	crtc->cursor_x = crtc_x;
11496
	crtc->cursor_x = crtc_x;
11474
	crtc->cursor_y = crtc_y;
11497
	crtc->cursor_y = crtc_y;
11475
	if (fb != crtc->cursor->fb) {
11498
	if (fb != crtc->cursor->fb) {
11476
		return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
11499
		return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
11477
	} else {
11500
	} else {
11478
		intel_crtc_update_cursor(crtc, visible);
11501
		intel_crtc_update_cursor(crtc, visible);
11479
		return 0;
11502
		return 0;
11480
	}
11503
	}
11481
}
11504
}
11482
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
11505
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
11483
	.update_plane = intel_cursor_plane_update,
11506
	.update_plane = intel_cursor_plane_update,
11484
	.disable_plane = intel_cursor_plane_disable,
11507
	.disable_plane = intel_cursor_plane_disable,
11485
	.destroy = intel_plane_destroy,
11508
	.destroy = intel_plane_destroy,
11486
};
11509
};
11487
 
11510
 
11488
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
11511
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
11489
						   int pipe)
11512
						   int pipe)
11490
{
11513
{
11491
	struct intel_plane *cursor;
11514
	struct intel_plane *cursor;
11492
 
11515
 
11493
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
11516
	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
11494
	if (cursor == NULL)
11517
	if (cursor == NULL)
11495
		return NULL;
11518
		return NULL;
11496
 
11519
 
11497
	cursor->can_scale = false;
11520
	cursor->can_scale = false;
11498
	cursor->max_downscale = 1;
11521
	cursor->max_downscale = 1;
11499
	cursor->pipe = pipe;
11522
	cursor->pipe = pipe;
11500
	cursor->plane = pipe;
11523
	cursor->plane = pipe;
11501
 
11524
 
11502
	drm_universal_plane_init(dev, &cursor->base, 0,
11525
	drm_universal_plane_init(dev, &cursor->base, 0,
11503
				 &intel_cursor_plane_funcs,
11526
				 &intel_cursor_plane_funcs,
11504
				 intel_cursor_formats,
11527
				 intel_cursor_formats,
11505
				 ARRAY_SIZE(intel_cursor_formats),
11528
				 ARRAY_SIZE(intel_cursor_formats),
11506
				 DRM_PLANE_TYPE_CURSOR);
11529
				 DRM_PLANE_TYPE_CURSOR);
11507
	return &cursor->base;
11530
	return &cursor->base;
11508
}
11531
}
11509
 
11532
 
11510
static void intel_crtc_init(struct drm_device *dev, int pipe)
11533
static void intel_crtc_init(struct drm_device *dev, int pipe)
11511
{
11534
{
11512
	struct drm_i915_private *dev_priv = dev->dev_private;
11535
	struct drm_i915_private *dev_priv = dev->dev_private;
11513
	struct intel_crtc *intel_crtc;
11536
	struct intel_crtc *intel_crtc;
11514
	struct drm_plane *primary = NULL;
11537
	struct drm_plane *primary = NULL;
11515
	struct drm_plane *cursor = NULL;
11538
	struct drm_plane *cursor = NULL;
11516
	int i, ret;
11539
	int i, ret;
11517
 
11540
 
11518
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
11541
	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
11519
	if (intel_crtc == NULL)
11542
	if (intel_crtc == NULL)
11520
		return;
11543
		return;
11521
 
11544
 
11522
	primary = intel_primary_plane_create(dev, pipe);
11545
	primary = intel_primary_plane_create(dev, pipe);
11523
	if (!primary)
11546
	if (!primary)
11524
		goto fail;
11547
		goto fail;
11525
 
11548
 
11526
	cursor = intel_cursor_plane_create(dev, pipe);
11549
	cursor = intel_cursor_plane_create(dev, pipe);
11527
	if (!cursor)
11550
	if (!cursor)
11528
		goto fail;
11551
		goto fail;
11529
 
11552
 
11530
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
11553
	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
11531
					cursor, &intel_crtc_funcs);
11554
					cursor, &intel_crtc_funcs);
11532
	if (ret)
11555
	if (ret)
11533
		goto fail;
11556
		goto fail;
11534
 
11557
 
11535
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
11558
	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
11536
	for (i = 0; i < 256; i++) {
11559
	for (i = 0; i < 256; i++) {
11537
		intel_crtc->lut_r[i] = i;
11560
		intel_crtc->lut_r[i] = i;
11538
		intel_crtc->lut_g[i] = i;
11561
		intel_crtc->lut_g[i] = i;
11539
		intel_crtc->lut_b[i] = i;
11562
		intel_crtc->lut_b[i] = i;
11540
	}
11563
	}
11541
 
11564
 
11542
	/*
11565
	/*
11543
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
11566
	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
11544
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
11567
	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
11545
	 */
11568
	 */
11546
	intel_crtc->pipe = pipe;
11569
	intel_crtc->pipe = pipe;
11547
	intel_crtc->plane = pipe;
11570
	intel_crtc->plane = pipe;
11548
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
11571
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
11549
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
11572
		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
11550
		intel_crtc->plane = !pipe;
11573
		intel_crtc->plane = !pipe;
11551
	}
11574
	}
11552
 
11575
 
11553
	intel_crtc->cursor_base = ~0;
11576
	intel_crtc->cursor_base = ~0;
11554
	intel_crtc->cursor_cntl = ~0;
11577
	intel_crtc->cursor_cntl = ~0;
11555
 
11578
 
11556
	init_waitqueue_head(&intel_crtc->vbl_wait);
11579
	init_waitqueue_head(&intel_crtc->vbl_wait);
11557
 
11580
 
11558
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
11581
	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
11559
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
11582
	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
11560
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
11583
	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
11561
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
11584
	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
11562
 
11585
 
11563
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
11586
	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
11564
 
11587
 
11565
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
11588
	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
11566
	return;
11589
	return;
11567
 
11590
 
11568
fail:
11591
fail:
11569
	if (primary)
11592
	if (primary)
11570
		drm_plane_cleanup(primary);
11593
		drm_plane_cleanup(primary);
11571
	if (cursor)
11594
	if (cursor)
11572
		drm_plane_cleanup(cursor);
11595
		drm_plane_cleanup(cursor);
11573
	kfree(intel_crtc);
11596
	kfree(intel_crtc);
11574
}
11597
}
11575
 
11598
 
11576
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
11599
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
11577
{
11600
{
11578
	struct drm_encoder *encoder = connector->base.encoder;
11601
	struct drm_encoder *encoder = connector->base.encoder;
11579
	struct drm_device *dev = connector->base.dev;
11602
	struct drm_device *dev = connector->base.dev;
11580
 
11603
 
11581
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
11604
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
11582
 
11605
 
11583
	if (!encoder)
11606
	if (!encoder)
11584
		return INVALID_PIPE;
11607
		return INVALID_PIPE;
11585
 
11608
 
11586
	return to_intel_crtc(encoder->crtc)->pipe;
11609
	return to_intel_crtc(encoder->crtc)->pipe;
11587
}
11610
}
11588
 
11611
 
11589
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
11612
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
11590
				struct drm_file *file)
11613
				struct drm_file *file)
11591
{
11614
{
11592
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11615
	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11593
	struct drm_crtc *drmmode_crtc;
11616
	struct drm_crtc *drmmode_crtc;
11594
	struct intel_crtc *crtc;
11617
	struct intel_crtc *crtc;
11595
 
11618
 
11596
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
11619
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
11597
		return -ENODEV;
11620
		return -ENODEV;
11598
 
11621
 
11599
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
11622
	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
11600
 
11623
 
11601
	if (!drmmode_crtc) {
11624
	if (!drmmode_crtc) {
11602
		DRM_ERROR("no such CRTC id\n");
11625
		DRM_ERROR("no such CRTC id\n");
11603
		return -ENOENT;
11626
		return -ENOENT;
11604
	}
11627
	}
11605
 
11628
 
11606
	crtc = to_intel_crtc(drmmode_crtc);
11629
	crtc = to_intel_crtc(drmmode_crtc);
11607
	pipe_from_crtc_id->pipe = crtc->pipe;
11630
	pipe_from_crtc_id->pipe = crtc->pipe;
11608
 
11631
 
11609
	return 0;
11632
	return 0;
11610
}
11633
}
11611
 
11634
 
11612
static int intel_encoder_clones(struct intel_encoder *encoder)
11635
static int intel_encoder_clones(struct intel_encoder *encoder)
11613
{
11636
{
11614
	struct drm_device *dev = encoder->base.dev;
11637
	struct drm_device *dev = encoder->base.dev;
11615
	struct intel_encoder *source_encoder;
11638
	struct intel_encoder *source_encoder;
11616
	int index_mask = 0;
11639
	int index_mask = 0;
11617
	int entry = 0;
11640
	int entry = 0;
11618
 
11641
 
11619
	list_for_each_entry(source_encoder,
11642
	list_for_each_entry(source_encoder,
11620
			    &dev->mode_config.encoder_list, base.head) {
11643
			    &dev->mode_config.encoder_list, base.head) {
11621
		if (encoders_cloneable(encoder, source_encoder))
11644
		if (encoders_cloneable(encoder, source_encoder))
11622
			index_mask |= (1 << entry);
11645
			index_mask |= (1 << entry);
11623
 
11646
 
11624
		entry++;
11647
		entry++;
11625
	}
11648
	}
11626
 
11649
 
11627
	return index_mask;
11650
	return index_mask;
11628
}
11651
}
11629
 
11652
 
11630
static bool has_edp_a(struct drm_device *dev)
11653
static bool has_edp_a(struct drm_device *dev)
11631
{
11654
{
11632
	struct drm_i915_private *dev_priv = dev->dev_private;
11655
	struct drm_i915_private *dev_priv = dev->dev_private;
11633
 
11656
 
11634
	if (!IS_MOBILE(dev))
11657
	if (!IS_MOBILE(dev))
11635
		return false;
11658
		return false;
11636
 
11659
 
11637
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
11660
	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
11638
		return false;
11661
		return false;
11639
 
11662
 
11640
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
11663
	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
11641
		return false;
11664
		return false;
11642
 
11665
 
11643
	return true;
11666
	return true;
11644
}
11667
}
11645
 
11668
 
11646
const char *intel_output_name(int output)
11669
const char *intel_output_name(int output)
11647
{
11670
{
11648
	static const char *names[] = {
11671
	static const char *names[] = {
11649
		[INTEL_OUTPUT_UNUSED] = "Unused",
11672
		[INTEL_OUTPUT_UNUSED] = "Unused",
11650
		[INTEL_OUTPUT_ANALOG] = "Analog",
11673
		[INTEL_OUTPUT_ANALOG] = "Analog",
11651
		[INTEL_OUTPUT_DVO] = "DVO",
11674
		[INTEL_OUTPUT_DVO] = "DVO",
11652
		[INTEL_OUTPUT_SDVO] = "SDVO",
11675
		[INTEL_OUTPUT_SDVO] = "SDVO",
11653
		[INTEL_OUTPUT_LVDS] = "LVDS",
11676
		[INTEL_OUTPUT_LVDS] = "LVDS",
11654
		[INTEL_OUTPUT_TVOUT] = "TV",
11677
		[INTEL_OUTPUT_TVOUT] = "TV",
11655
		[INTEL_OUTPUT_HDMI] = "HDMI",
11678
		[INTEL_OUTPUT_HDMI] = "HDMI",
11656
		[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
11679
		[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
11657
		[INTEL_OUTPUT_EDP] = "eDP",
11680
		[INTEL_OUTPUT_EDP] = "eDP",
11658
		[INTEL_OUTPUT_DSI] = "DSI",
11681
		[INTEL_OUTPUT_DSI] = "DSI",
11659
		[INTEL_OUTPUT_UNKNOWN] = "Unknown",
11682
		[INTEL_OUTPUT_UNKNOWN] = "Unknown",
11660
	};
11683
	};
11661
 
11684
 
11662
	if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
11685
	if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
11663
		return "Invalid";
11686
		return "Invalid";
11664
 
11687
 
11665
	return names[output];
11688
	return names[output];
11666
}
11689
}
11667
 
11690
 
11668
static bool intel_crt_present(struct drm_device *dev)
11691
static bool intel_crt_present(struct drm_device *dev)
11669
{
11692
{
11670
	struct drm_i915_private *dev_priv = dev->dev_private;
11693
	struct drm_i915_private *dev_priv = dev->dev_private;
11671
 
11694
 
11672
	if (IS_ULT(dev))
11695
	if (IS_ULT(dev))
11673
		return false;
11696
		return false;
11674
 
11697
 
11675
	if (IS_CHERRYVIEW(dev))
11698
	if (IS_CHERRYVIEW(dev))
11676
		return false;
11699
		return false;
11677
 
11700
 
11678
	if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
11701
	if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
11679
		return false;
11702
		return false;
11680
 
11703
 
11681
	return true;
11704
	return true;
11682
}
11705
}
11683
 
11706
 
11684
static void intel_setup_outputs(struct drm_device *dev)
11707
static void intel_setup_outputs(struct drm_device *dev)
11685
{
11708
{
11686
	struct drm_i915_private *dev_priv = dev->dev_private;
11709
	struct drm_i915_private *dev_priv = dev->dev_private;
11687
	struct intel_encoder *encoder;
11710
	struct intel_encoder *encoder;
11688
	bool dpd_is_edp = false;
11711
	bool dpd_is_edp = false;
11689
 
11712
 
11690
	intel_lvds_init(dev);
11713
	intel_lvds_init(dev);
11691
 
11714
 
11692
	if (intel_crt_present(dev))
11715
	if (intel_crt_present(dev))
11693
	intel_crt_init(dev);
11716
	intel_crt_init(dev);
11694
 
11717
 
11695
	if (HAS_DDI(dev)) {
11718
	if (HAS_DDI(dev)) {
11696
		int found;
11719
		int found;
11697
 
11720
 
11698
		/* Haswell uses DDI functions to detect digital outputs */
11721
		/* Haswell uses DDI functions to detect digital outputs */
11699
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
11722
		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
11700
		/* DDI A only supports eDP */
11723
		/* DDI A only supports eDP */
11701
		if (found)
11724
		if (found)
11702
			intel_ddi_init(dev, PORT_A);
11725
			intel_ddi_init(dev, PORT_A);
11703
 
11726
 
11704
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
11727
		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
11705
		 * register */
11728
		 * register */
11706
		found = I915_READ(SFUSE_STRAP);
11729
		found = I915_READ(SFUSE_STRAP);
11707
 
11730
 
11708
		if (found & SFUSE_STRAP_DDIB_DETECTED)
11731
		if (found & SFUSE_STRAP_DDIB_DETECTED)
11709
			intel_ddi_init(dev, PORT_B);
11732
			intel_ddi_init(dev, PORT_B);
11710
		if (found & SFUSE_STRAP_DDIC_DETECTED)
11733
		if (found & SFUSE_STRAP_DDIC_DETECTED)
11711
			intel_ddi_init(dev, PORT_C);
11734
			intel_ddi_init(dev, PORT_C);
11712
		if (found & SFUSE_STRAP_DDID_DETECTED)
11735
		if (found & SFUSE_STRAP_DDID_DETECTED)
11713
			intel_ddi_init(dev, PORT_D);
11736
			intel_ddi_init(dev, PORT_D);
11714
	} else if (HAS_PCH_SPLIT(dev)) {
11737
	} else if (HAS_PCH_SPLIT(dev)) {
11715
		int found;
11738
		int found;
11716
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
11739
		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
11717
 
11740
 
11718
		if (has_edp_a(dev))
11741
		if (has_edp_a(dev))
11719
			intel_dp_init(dev, DP_A, PORT_A);
11742
			intel_dp_init(dev, DP_A, PORT_A);
11720
 
11743
 
11721
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
11744
		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
11722
			/* PCH SDVOB multiplex with HDMIB */
11745
			/* PCH SDVOB multiplex with HDMIB */
11723
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
11746
			found = intel_sdvo_init(dev, PCH_SDVOB, true);
11724
			if (!found)
11747
			if (!found)
11725
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
11748
				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
11726
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
11749
			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
11727
				intel_dp_init(dev, PCH_DP_B, PORT_B);
11750
				intel_dp_init(dev, PCH_DP_B, PORT_B);
11728
		}
11751
		}
11729
 
11752
 
11730
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
11753
		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
11731
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
11754
			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
11732
 
11755
 
11733
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
11756
		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
11734
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
11757
			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
11735
 
11758
 
11736
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
11759
		if (I915_READ(PCH_DP_C) & DP_DETECTED)
11737
			intel_dp_init(dev, PCH_DP_C, PORT_C);
11760
			intel_dp_init(dev, PCH_DP_C, PORT_C);
11738
 
11761
 
11739
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
11762
		if (I915_READ(PCH_DP_D) & DP_DETECTED)
11740
			intel_dp_init(dev, PCH_DP_D, PORT_D);
11763
			intel_dp_init(dev, PCH_DP_D, PORT_D);
11741
	} else if (IS_VALLEYVIEW(dev)) {
11764
	} else if (IS_VALLEYVIEW(dev)) {
11742
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
11765
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
11743
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
11766
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
11744
					PORT_B);
11767
					PORT_B);
11745
			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
11768
			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
11746
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
11769
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
11747
		}
11770
		}
11748
 
11771
 
11749
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
11772
		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
11750
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
11773
			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
11751
					PORT_C);
11774
					PORT_C);
11752
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
11775
		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
11753
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
11776
				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
11754
		}
11777
		}
11755
 
11778
 
11756
		if (IS_CHERRYVIEW(dev)) {
11779
		if (IS_CHERRYVIEW(dev)) {
11757
			if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
11780
			if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
11758
				intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
11781
				intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
11759
						PORT_D);
11782
						PORT_D);
11760
				if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
11783
				if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
11761
					intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
11784
					intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
11762
			}
11785
			}
11763
		}
11786
		}
11764
 
11787
 
11765
		intel_dsi_init(dev);
11788
		intel_dsi_init(dev);
11766
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
11789
	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
11767
		bool found = false;
11790
		bool found = false;
11768
 
11791
 
11769
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
11792
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
11770
			DRM_DEBUG_KMS("probing SDVOB\n");
11793
			DRM_DEBUG_KMS("probing SDVOB\n");
11771
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
11794
			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
11772
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
11795
			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
11773
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
11796
				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
11774
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
11797
				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
11775
			}
11798
			}
11776
 
11799
 
11777
			if (!found && SUPPORTS_INTEGRATED_DP(dev))
11800
			if (!found && SUPPORTS_INTEGRATED_DP(dev))
11778
				intel_dp_init(dev, DP_B, PORT_B);
11801
				intel_dp_init(dev, DP_B, PORT_B);
11779
			}
11802
			}
11780
 
11803
 
11781
		/* Before G4X SDVOC doesn't have its own detect register */
11804
		/* Before G4X SDVOC doesn't have its own detect register */
11782
 
11805
 
11783
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
11806
		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
11784
			DRM_DEBUG_KMS("probing SDVOC\n");
11807
			DRM_DEBUG_KMS("probing SDVOC\n");
11785
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
11808
			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
11786
		}
11809
		}
11787
 
11810
 
11788
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
11811
		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
11789
 
11812
 
11790
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
11813
			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
11791
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
11814
				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
11792
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
11815
				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
11793
			}
11816
			}
11794
			if (SUPPORTS_INTEGRATED_DP(dev))
11817
			if (SUPPORTS_INTEGRATED_DP(dev))
11795
				intel_dp_init(dev, DP_C, PORT_C);
11818
				intel_dp_init(dev, DP_C, PORT_C);
11796
			}
11819
			}
11797
 
11820
 
11798
		if (SUPPORTS_INTEGRATED_DP(dev) &&
11821
		if (SUPPORTS_INTEGRATED_DP(dev) &&
11799
		    (I915_READ(DP_D) & DP_DETECTED))
11822
		    (I915_READ(DP_D) & DP_DETECTED))
11800
			intel_dp_init(dev, DP_D, PORT_D);
11823
			intel_dp_init(dev, DP_D, PORT_D);
11801
	} else if (IS_GEN2(dev))
11824
	} else if (IS_GEN2(dev))
11802
		intel_dvo_init(dev);
11825
		intel_dvo_init(dev);
11803
 
11826
 
11804
 
11827
 
11805
	intel_edp_psr_init(dev);
11828
	intel_edp_psr_init(dev);
11806
 
11829
 
11807
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
11830
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
11808
		encoder->base.possible_crtcs = encoder->crtc_mask;
11831
		encoder->base.possible_crtcs = encoder->crtc_mask;
11809
		encoder->base.possible_clones =
11832
		encoder->base.possible_clones =
11810
			intel_encoder_clones(encoder);
11833
			intel_encoder_clones(encoder);
11811
	}
11834
	}
11812
 
11835
 
11813
	intel_init_pch_refclk(dev);
11836
	intel_init_pch_refclk(dev);
11814
 
11837
 
11815
	drm_helper_move_panel_connectors_to_head(dev);
11838
	drm_helper_move_panel_connectors_to_head(dev);
11816
}
11839
}
11817
 
11840
 
11818
 
11841
 
11819
 
11842
 
11820
static const struct drm_framebuffer_funcs intel_fb_funcs = {
11843
static const struct drm_framebuffer_funcs intel_fb_funcs = {
11821
//	.destroy = intel_user_framebuffer_destroy,
11844
//	.destroy = intel_user_framebuffer_destroy,
11822
//	.create_handle = intel_user_framebuffer_create_handle,
11845
//	.create_handle = intel_user_framebuffer_create_handle,
11823
};
11846
};
11824
 
11847
 
11825
static int intel_framebuffer_init(struct drm_device *dev,
11848
static int intel_framebuffer_init(struct drm_device *dev,
11826
			   struct intel_framebuffer *intel_fb,
11849
			   struct intel_framebuffer *intel_fb,
11827
			   struct drm_mode_fb_cmd2 *mode_cmd,
11850
			   struct drm_mode_fb_cmd2 *mode_cmd,
11828
			   struct drm_i915_gem_object *obj)
11851
			   struct drm_i915_gem_object *obj)
11829
{
11852
{
11830
	int aligned_height;
11853
	int aligned_height;
11831
	int pitch_limit;
11854
	int pitch_limit;
11832
	int ret;
11855
	int ret;
11833
 
11856
 
11834
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
11857
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
11835
 
11858
 
11836
	if (obj->tiling_mode == I915_TILING_Y) {
11859
	if (obj->tiling_mode == I915_TILING_Y) {
11837
		DRM_DEBUG("hardware does not support tiling Y\n");
11860
		DRM_DEBUG("hardware does not support tiling Y\n");
11838
		return -EINVAL;
11861
		return -EINVAL;
11839
	}
11862
	}
11840
 
11863
 
11841
	if (mode_cmd->pitches[0] & 63) {
11864
	if (mode_cmd->pitches[0] & 63) {
11842
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
11865
		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
11843
			  mode_cmd->pitches[0]);
11866
			  mode_cmd->pitches[0]);
11844
		return -EINVAL;
11867
		return -EINVAL;
11845
	}
11868
	}
11846
 
11869
 
11847
	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
11870
	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
11848
		pitch_limit = 32*1024;
11871
		pitch_limit = 32*1024;
11849
	} else if (INTEL_INFO(dev)->gen >= 4) {
11872
	} else if (INTEL_INFO(dev)->gen >= 4) {
11850
		if (obj->tiling_mode)
11873
		if (obj->tiling_mode)
11851
			pitch_limit = 16*1024;
11874
			pitch_limit = 16*1024;
11852
		else
11875
		else
11853
			pitch_limit = 32*1024;
11876
			pitch_limit = 32*1024;
11854
	} else if (INTEL_INFO(dev)->gen >= 3) {
11877
	} else if (INTEL_INFO(dev)->gen >= 3) {
11855
		if (obj->tiling_mode)
11878
		if (obj->tiling_mode)
11856
			pitch_limit = 8*1024;
11879
			pitch_limit = 8*1024;
11857
		else
11880
		else
11858
			pitch_limit = 16*1024;
11881
			pitch_limit = 16*1024;
11859
	} else
11882
	} else
11860
		/* XXX DSPC is limited to 4k tiled */
11883
		/* XXX DSPC is limited to 4k tiled */
11861
		pitch_limit = 8*1024;
11884
		pitch_limit = 8*1024;
11862
 
11885
 
11863
	if (mode_cmd->pitches[0] > pitch_limit) {
11886
	if (mode_cmd->pitches[0] > pitch_limit) {
11864
		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
11887
		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
11865
			  obj->tiling_mode ? "tiled" : "linear",
11888
			  obj->tiling_mode ? "tiled" : "linear",
11866
			  mode_cmd->pitches[0], pitch_limit);
11889
			  mode_cmd->pitches[0], pitch_limit);
11867
		return -EINVAL;
11890
		return -EINVAL;
11868
	}
11891
	}
11869
 
11892
 
11870
	if (obj->tiling_mode != I915_TILING_NONE &&
11893
	if (obj->tiling_mode != I915_TILING_NONE &&
11871
	    mode_cmd->pitches[0] != obj->stride) {
11894
	    mode_cmd->pitches[0] != obj->stride) {
11872
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
11895
		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
11873
			  mode_cmd->pitches[0], obj->stride);
11896
			  mode_cmd->pitches[0], obj->stride);
11874
			return -EINVAL;
11897
			return -EINVAL;
11875
	}
11898
	}
11876
 
11899
 
11877
	/* Reject formats not supported by any plane early. */
11900
	/* Reject formats not supported by any plane early. */
11878
	switch (mode_cmd->pixel_format) {
11901
	switch (mode_cmd->pixel_format) {
11879
	case DRM_FORMAT_C8:
11902
	case DRM_FORMAT_C8:
11880
	case DRM_FORMAT_RGB565:
11903
	case DRM_FORMAT_RGB565:
11881
	case DRM_FORMAT_XRGB8888:
11904
	case DRM_FORMAT_XRGB8888:
11882
	case DRM_FORMAT_ARGB8888:
11905
	case DRM_FORMAT_ARGB8888:
11883
		break;
11906
		break;
11884
	case DRM_FORMAT_XRGB1555:
11907
	case DRM_FORMAT_XRGB1555:
11885
	case DRM_FORMAT_ARGB1555:
11908
	case DRM_FORMAT_ARGB1555:
11886
		if (INTEL_INFO(dev)->gen > 3) {
11909
		if (INTEL_INFO(dev)->gen > 3) {
11887
			DRM_DEBUG("unsupported pixel format: %s\n",
11910
			DRM_DEBUG("unsupported pixel format: %s\n",
11888
				  drm_get_format_name(mode_cmd->pixel_format));
11911
				  drm_get_format_name(mode_cmd->pixel_format));
11889
			return -EINVAL;
11912
			return -EINVAL;
11890
		}
11913
		}
11891
		break;
11914
		break;
11892
	case DRM_FORMAT_XBGR8888:
11915
	case DRM_FORMAT_XBGR8888:
11893
	case DRM_FORMAT_ABGR8888:
11916
	case DRM_FORMAT_ABGR8888:
11894
	case DRM_FORMAT_XRGB2101010:
11917
	case DRM_FORMAT_XRGB2101010:
11895
	case DRM_FORMAT_ARGB2101010:
11918
	case DRM_FORMAT_ARGB2101010:
11896
	case DRM_FORMAT_XBGR2101010:
11919
	case DRM_FORMAT_XBGR2101010:
11897
	case DRM_FORMAT_ABGR2101010:
11920
	case DRM_FORMAT_ABGR2101010:
11898
		if (INTEL_INFO(dev)->gen < 4) {
11921
		if (INTEL_INFO(dev)->gen < 4) {
11899
			DRM_DEBUG("unsupported pixel format: %s\n",
11922
			DRM_DEBUG("unsupported pixel format: %s\n",
11900
				  drm_get_format_name(mode_cmd->pixel_format));
11923
				  drm_get_format_name(mode_cmd->pixel_format));
11901
			return -EINVAL;
11924
			return -EINVAL;
11902
		}
11925
		}
11903
		break;
11926
		break;
11904
	case DRM_FORMAT_YUYV:
11927
	case DRM_FORMAT_YUYV:
11905
	case DRM_FORMAT_UYVY:
11928
	case DRM_FORMAT_UYVY:
11906
	case DRM_FORMAT_YVYU:
11929
	case DRM_FORMAT_YVYU:
11907
	case DRM_FORMAT_VYUY:
11930
	case DRM_FORMAT_VYUY:
11908
		if (INTEL_INFO(dev)->gen < 5) {
11931
		if (INTEL_INFO(dev)->gen < 5) {
11909
			DRM_DEBUG("unsupported pixel format: %s\n",
11932
			DRM_DEBUG("unsupported pixel format: %s\n",
11910
				  drm_get_format_name(mode_cmd->pixel_format));
11933
				  drm_get_format_name(mode_cmd->pixel_format));
11911
			return -EINVAL;
11934
			return -EINVAL;
11912
		}
11935
		}
11913
		break;
11936
		break;
11914
	default:
11937
	default:
11915
		DRM_DEBUG("unsupported pixel format: %s\n",
11938
		DRM_DEBUG("unsupported pixel format: %s\n",
11916
			  drm_get_format_name(mode_cmd->pixel_format));
11939
			  drm_get_format_name(mode_cmd->pixel_format));
11917
		return -EINVAL;
11940
		return -EINVAL;
11918
	}
11941
	}
11919
 
11942
 
11920
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11943
	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11921
	if (mode_cmd->offsets[0] != 0)
11944
	if (mode_cmd->offsets[0] != 0)
11922
		return -EINVAL;
11945
		return -EINVAL;
11923
 
11946
 
11924
	aligned_height = intel_align_height(dev, mode_cmd->height,
11947
	aligned_height = intel_align_height(dev, mode_cmd->height,
11925
					    obj->tiling_mode);
11948
					    obj->tiling_mode);
11926
	/* FIXME drm helper for size checks (especially planar formats)? */
11949
	/* FIXME drm helper for size checks (especially planar formats)? */
11927
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
11950
	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
11928
		return -EINVAL;
11951
		return -EINVAL;
11929
 
11952
 
11930
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
11953
	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
11931
	intel_fb->obj = obj;
11954
	intel_fb->obj = obj;
11932
	intel_fb->obj->framebuffer_references++;
11955
	intel_fb->obj->framebuffer_references++;
11933
 
11956
 
11934
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
11957
	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
11935
	if (ret) {
11958
	if (ret) {
11936
		DRM_ERROR("framebuffer init failed %d\n", ret);
11959
		DRM_ERROR("framebuffer init failed %d\n", ret);
11937
		return ret;
11960
		return ret;
11938
	}
11961
	}
11939
 
11962
 
11940
	return 0;
11963
	return 0;
11941
}
11964
}
11942
 
11965
 
11943
#ifndef CONFIG_DRM_I915_FBDEV
11966
#ifndef CONFIG_DRM_I915_FBDEV
11944
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
11967
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
11945
{
11968
{
11946
}
11969
}
11947
#endif
11970
#endif
11948
 
11971
 
11949
static const struct drm_mode_config_funcs intel_mode_funcs = {
11972
static const struct drm_mode_config_funcs intel_mode_funcs = {
11950
	.fb_create = NULL,
11973
	.fb_create = NULL,
11951
	.output_poll_changed = intel_fbdev_output_poll_changed,
11974
	.output_poll_changed = intel_fbdev_output_poll_changed,
11952
};
11975
};
11953
 
11976
 
11954
/* Set up chip specific display functions */
11977
/* Set up chip specific display functions */
11955
static void intel_init_display(struct drm_device *dev)
11978
static void intel_init_display(struct drm_device *dev)
11956
{
11979
{
11957
	struct drm_i915_private *dev_priv = dev->dev_private;
11980
	struct drm_i915_private *dev_priv = dev->dev_private;
11958
 
11981
 
11959
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
11982
	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
11960
		dev_priv->display.find_dpll = g4x_find_best_dpll;
11983
		dev_priv->display.find_dpll = g4x_find_best_dpll;
11961
	else if (IS_CHERRYVIEW(dev))
11984
	else if (IS_CHERRYVIEW(dev))
11962
		dev_priv->display.find_dpll = chv_find_best_dpll;
11985
		dev_priv->display.find_dpll = chv_find_best_dpll;
11963
	else if (IS_VALLEYVIEW(dev))
11986
	else if (IS_VALLEYVIEW(dev))
11964
		dev_priv->display.find_dpll = vlv_find_best_dpll;
11987
		dev_priv->display.find_dpll = vlv_find_best_dpll;
11965
	else if (IS_PINEVIEW(dev))
11988
	else if (IS_PINEVIEW(dev))
11966
		dev_priv->display.find_dpll = pnv_find_best_dpll;
11989
		dev_priv->display.find_dpll = pnv_find_best_dpll;
11967
	else
11990
	else
11968
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
11991
		dev_priv->display.find_dpll = i9xx_find_best_dpll;
11969
 
11992
 
11970
	if (HAS_DDI(dev)) {
11993
	if (HAS_DDI(dev)) {
11971
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
11994
		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
11972
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
11995
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
11973
		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
11996
		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
11974
		dev_priv->display.crtc_enable = haswell_crtc_enable;
11997
		dev_priv->display.crtc_enable = haswell_crtc_enable;
11975
		dev_priv->display.crtc_disable = haswell_crtc_disable;
11998
		dev_priv->display.crtc_disable = haswell_crtc_disable;
11976
		dev_priv->display.off = ironlake_crtc_off;
11999
		dev_priv->display.off = ironlake_crtc_off;
11977
		dev_priv->display.update_primary_plane =
12000
		dev_priv->display.update_primary_plane =
11978
			ironlake_update_primary_plane;
12001
			ironlake_update_primary_plane;
11979
	} else if (HAS_PCH_SPLIT(dev)) {
12002
	} else if (HAS_PCH_SPLIT(dev)) {
11980
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
12003
		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
11981
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
12004
		dev_priv->display.get_plane_config = ironlake_get_plane_config;
11982
		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
12005
		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
11983
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
12006
		dev_priv->display.crtc_enable = ironlake_crtc_enable;
11984
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
12007
		dev_priv->display.crtc_disable = ironlake_crtc_disable;
11985
		dev_priv->display.off = ironlake_crtc_off;
12008
		dev_priv->display.off = ironlake_crtc_off;
11986
		dev_priv->display.update_primary_plane =
12009
		dev_priv->display.update_primary_plane =
11987
			ironlake_update_primary_plane;
12010
			ironlake_update_primary_plane;
11988
	} else if (IS_VALLEYVIEW(dev)) {
12011
	} else if (IS_VALLEYVIEW(dev)) {
11989
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12012
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11990
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
12013
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
11991
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
12014
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
11992
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
12015
		dev_priv->display.crtc_enable = valleyview_crtc_enable;
11993
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12016
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
11994
		dev_priv->display.off = i9xx_crtc_off;
12017
		dev_priv->display.off = i9xx_crtc_off;
11995
		dev_priv->display.update_primary_plane =
12018
		dev_priv->display.update_primary_plane =
11996
			i9xx_update_primary_plane;
12019
			i9xx_update_primary_plane;
11997
	} else {
12020
	} else {
11998
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12021
		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11999
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
12022
		dev_priv->display.get_plane_config = i9xx_get_plane_config;
12000
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
12023
		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
12001
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
12024
		dev_priv->display.crtc_enable = i9xx_crtc_enable;
12002
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12025
		dev_priv->display.crtc_disable = i9xx_crtc_disable;
12003
		dev_priv->display.off = i9xx_crtc_off;
12026
		dev_priv->display.off = i9xx_crtc_off;
12004
		dev_priv->display.update_primary_plane =
12027
		dev_priv->display.update_primary_plane =
12005
			i9xx_update_primary_plane;
12028
			i9xx_update_primary_plane;
12006
	}
12029
	}
12007
 
12030
 
12008
	/* Returns the core display clock speed */
12031
	/* Returns the core display clock speed */
12009
	if (IS_VALLEYVIEW(dev))
12032
	if (IS_VALLEYVIEW(dev))
12010
		dev_priv->display.get_display_clock_speed =
12033
		dev_priv->display.get_display_clock_speed =
12011
			valleyview_get_display_clock_speed;
12034
			valleyview_get_display_clock_speed;
12012
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
12035
	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
12013
		dev_priv->display.get_display_clock_speed =
12036
		dev_priv->display.get_display_clock_speed =
12014
			i945_get_display_clock_speed;
12037
			i945_get_display_clock_speed;
12015
	else if (IS_I915G(dev))
12038
	else if (IS_I915G(dev))
12016
		dev_priv->display.get_display_clock_speed =
12039
		dev_priv->display.get_display_clock_speed =
12017
			i915_get_display_clock_speed;
12040
			i915_get_display_clock_speed;
12018
	else if (IS_I945GM(dev) || IS_845G(dev))
12041
	else if (IS_I945GM(dev) || IS_845G(dev))
12019
		dev_priv->display.get_display_clock_speed =
12042
		dev_priv->display.get_display_clock_speed =
12020
			i9xx_misc_get_display_clock_speed;
12043
			i9xx_misc_get_display_clock_speed;
12021
	else if (IS_PINEVIEW(dev))
12044
	else if (IS_PINEVIEW(dev))
12022
		dev_priv->display.get_display_clock_speed =
12045
		dev_priv->display.get_display_clock_speed =
12023
			pnv_get_display_clock_speed;
12046
			pnv_get_display_clock_speed;
12024
	else if (IS_I915GM(dev))
12047
	else if (IS_I915GM(dev))
12025
		dev_priv->display.get_display_clock_speed =
12048
		dev_priv->display.get_display_clock_speed =
12026
			i915gm_get_display_clock_speed;
12049
			i915gm_get_display_clock_speed;
12027
	else if (IS_I865G(dev))
12050
	else if (IS_I865G(dev))
12028
		dev_priv->display.get_display_clock_speed =
12051
		dev_priv->display.get_display_clock_speed =
12029
			i865_get_display_clock_speed;
12052
			i865_get_display_clock_speed;
12030
	else if (IS_I85X(dev))
12053
	else if (IS_I85X(dev))
12031
		dev_priv->display.get_display_clock_speed =
12054
		dev_priv->display.get_display_clock_speed =
12032
			i855_get_display_clock_speed;
12055
			i855_get_display_clock_speed;
12033
	else /* 852, 830 */
12056
	else /* 852, 830 */
12034
		dev_priv->display.get_display_clock_speed =
12057
		dev_priv->display.get_display_clock_speed =
12035
			i830_get_display_clock_speed;
12058
			i830_get_display_clock_speed;
12036
 
12059
 
12037
	if (HAS_PCH_SPLIT(dev)) {
12060
	if (HAS_PCH_SPLIT(dev)) {
12038
		if (IS_GEN5(dev)) {
12061
		if (IS_GEN5(dev)) {
12039
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12062
			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12040
			dev_priv->display.write_eld = ironlake_write_eld;
12063
			dev_priv->display.write_eld = ironlake_write_eld;
12041
		} else if (IS_GEN6(dev)) {
12064
		} else if (IS_GEN6(dev)) {
12042
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12065
			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12043
			dev_priv->display.write_eld = ironlake_write_eld;
12066
			dev_priv->display.write_eld = ironlake_write_eld;
12044
			dev_priv->display.modeset_global_resources =
12067
			dev_priv->display.modeset_global_resources =
12045
				snb_modeset_global_resources;
12068
				snb_modeset_global_resources;
12046
		} else if (IS_IVYBRIDGE(dev)) {
12069
		} else if (IS_IVYBRIDGE(dev)) {
12047
			/* FIXME: detect B0+ stepping and use auto training */
12070
			/* FIXME: detect B0+ stepping and use auto training */
12048
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12071
			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12049
			dev_priv->display.write_eld = ironlake_write_eld;
12072
			dev_priv->display.write_eld = ironlake_write_eld;
12050
			dev_priv->display.modeset_global_resources =
12073
			dev_priv->display.modeset_global_resources =
12051
				ivb_modeset_global_resources;
12074
				ivb_modeset_global_resources;
12052
		} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
12075
		} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
12053
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12076
			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12054
			dev_priv->display.write_eld = haswell_write_eld;
12077
			dev_priv->display.write_eld = haswell_write_eld;
12055
			dev_priv->display.modeset_global_resources =
12078
			dev_priv->display.modeset_global_resources =
12056
				haswell_modeset_global_resources;
12079
				haswell_modeset_global_resources;
12057
		}
12080
		}
12058
	} else if (IS_G4X(dev)) {
12081
	} else if (IS_G4X(dev)) {
12059
		dev_priv->display.write_eld = g4x_write_eld;
12082
		dev_priv->display.write_eld = g4x_write_eld;
12060
	} else if (IS_VALLEYVIEW(dev)) {
12083
	} else if (IS_VALLEYVIEW(dev)) {
12061
		dev_priv->display.modeset_global_resources =
12084
		dev_priv->display.modeset_global_resources =
12062
			valleyview_modeset_global_resources;
12085
			valleyview_modeset_global_resources;
12063
		dev_priv->display.write_eld = ironlake_write_eld;
12086
		dev_priv->display.write_eld = ironlake_write_eld;
12064
	}
12087
	}
12065
 
12088
 
12066
	/* Default just returns -ENODEV to indicate unsupported */
12089
	/* Default just returns -ENODEV to indicate unsupported */
12067
//	dev_priv->display.queue_flip = intel_default_queue_flip;
12090
//	dev_priv->display.queue_flip = intel_default_queue_flip;
12068
 
12091
 
12069
 
12092
 
12070
 
12093
 
12071
 
12094
 
12072
	intel_panel_init_backlight_funcs(dev);
12095
	intel_panel_init_backlight_funcs(dev);
12073
}
12096
}
12074
 
12097
 
12075
/*
12098
/*
12076
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
12099
 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
12077
 * resume, or other times.  This quirk makes sure that's the case for
12100
 * resume, or other times.  This quirk makes sure that's the case for
12078
 * affected systems.
12101
 * affected systems.
12079
 */
12102
 */
12080
static void quirk_pipea_force(struct drm_device *dev)
12103
static void quirk_pipea_force(struct drm_device *dev)
12081
{
12104
{
12082
	struct drm_i915_private *dev_priv = dev->dev_private;
12105
	struct drm_i915_private *dev_priv = dev->dev_private;
12083
 
12106
 
12084
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
12107
	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
12085
	DRM_INFO("applying pipe a force quirk\n");
12108
	DRM_INFO("applying pipe a force quirk\n");
12086
}
12109
}
12087
 
12110
 
12088
/*
12111
/*
12089
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12112
 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12090
 */
12113
 */
12091
static void quirk_ssc_force_disable(struct drm_device *dev)
12114
static void quirk_ssc_force_disable(struct drm_device *dev)
12092
{
12115
{
12093
	struct drm_i915_private *dev_priv = dev->dev_private;
12116
	struct drm_i915_private *dev_priv = dev->dev_private;
12094
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
12117
	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
12095
	DRM_INFO("applying lvds SSC disable quirk\n");
12118
	DRM_INFO("applying lvds SSC disable quirk\n");
12096
}
12119
}
12097
 
12120
 
12098
/*
12121
/*
12099
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
12122
 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
12100
 * brightness value
12123
 * brightness value
12101
 */
12124
 */
12102
static void quirk_invert_brightness(struct drm_device *dev)
12125
static void quirk_invert_brightness(struct drm_device *dev)
12103
{
12126
{
12104
	struct drm_i915_private *dev_priv = dev->dev_private;
12127
	struct drm_i915_private *dev_priv = dev->dev_private;
12105
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
12128
	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
12106
	DRM_INFO("applying inverted panel brightness quirk\n");
12129
	DRM_INFO("applying inverted panel brightness quirk\n");
12107
}
12130
}
12108
 
12131
 
12109
/* Some VBT's incorrectly indicate no backlight is present */
12132
/* Some VBT's incorrectly indicate no backlight is present */
12110
static void quirk_backlight_present(struct drm_device *dev)
12133
static void quirk_backlight_present(struct drm_device *dev)
12111
{
12134
{
12112
	struct drm_i915_private *dev_priv = dev->dev_private;
12135
	struct drm_i915_private *dev_priv = dev->dev_private;
12113
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
12136
	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
12114
	DRM_INFO("applying backlight present quirk\n");
12137
	DRM_INFO("applying backlight present quirk\n");
12115
}
12138
}
12116
 
12139
 
12117
struct intel_quirk {
12140
struct intel_quirk {
12118
	int device;
12141
	int device;
12119
	int subsystem_vendor;
12142
	int subsystem_vendor;
12120
	int subsystem_device;
12143
	int subsystem_device;
12121
	void (*hook)(struct drm_device *dev);
12144
	void (*hook)(struct drm_device *dev);
12122
};
12145
};
12123
 
12146
 
12124
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
12147
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
12125
struct intel_dmi_quirk {
12148
struct intel_dmi_quirk {
12126
	void (*hook)(struct drm_device *dev);
12149
	void (*hook)(struct drm_device *dev);
12127
	const struct dmi_system_id (*dmi_id_list)[];
12150
	const struct dmi_system_id (*dmi_id_list)[];
12128
};
12151
};
12129
 
12152
 
12130
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
12153
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
12131
{
12154
{
12132
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
12155
	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
12133
	return 1;
12156
	return 1;
12134
}
12157
}
12135
 
12158
 
12136
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
12159
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
12137
	{
12160
	{
12138
		.dmi_id_list = &(const struct dmi_system_id[]) {
12161
		.dmi_id_list = &(const struct dmi_system_id[]) {
12139
			{
12162
			{
12140
				.callback = intel_dmi_reverse_brightness,
12163
				.callback = intel_dmi_reverse_brightness,
12141
				.ident = "NCR Corporation",
12164
				.ident = "NCR Corporation",
12142
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
12165
				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
12143
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
12166
					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
12144
				},
12167
				},
12145
			},
12168
			},
12146
			{ }  /* terminating entry */
12169
			{ }  /* terminating entry */
12147
		},
12170
		},
12148
		.hook = quirk_invert_brightness,
12171
		.hook = quirk_invert_brightness,
12149
	},
12172
	},
12150
};
12173
};
12151
 
12174
 
12152
static struct intel_quirk intel_quirks[] = {
12175
static struct intel_quirk intel_quirks[] = {
12153
	/* HP Mini needs pipe A force quirk (LP: #322104) */
12176
	/* HP Mini needs pipe A force quirk (LP: #322104) */
12154
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
12177
	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
12155
 
12178
 
12156
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
12179
	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
12157
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
12180
	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
12158
 
12181
 
12159
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
12182
	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
12160
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
12183
	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
12161
 
12184
 
12162
	/* Lenovo U160 cannot use SSC on LVDS */
12185
	/* Lenovo U160 cannot use SSC on LVDS */
12163
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
12186
	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
12164
 
12187
 
12165
	/* Sony Vaio Y cannot use SSC on LVDS */
12188
	/* Sony Vaio Y cannot use SSC on LVDS */
12166
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
12189
	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
12167
 
12190
 
12168
	/* Acer Aspire 5734Z must invert backlight brightness */
12191
	/* Acer Aspire 5734Z must invert backlight brightness */
12169
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
12192
	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
12170
 
12193
 
12171
	/* Acer/eMachines G725 */
12194
	/* Acer/eMachines G725 */
12172
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
12195
	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
12173
 
12196
 
12174
	/* Acer/eMachines e725 */
12197
	/* Acer/eMachines e725 */
12175
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
12198
	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
12176
 
12199
 
12177
	/* Acer/Packard Bell NCL20 */
12200
	/* Acer/Packard Bell NCL20 */
12178
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
12201
	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
12179
 
12202
 
12180
	/* Acer Aspire 4736Z */
12203
	/* Acer Aspire 4736Z */
12181
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
12204
	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
12182
 
12205
 
12183
	/* Acer Aspire 5336 */
12206
	/* Acer Aspire 5336 */
12184
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
12207
	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
12185
 
12208
 
12186
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
12209
	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
12187
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
12210
	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
-
 
12211
 
-
 
12212
	/* Acer C720 Chromebook (Core i3 4005U) */
-
 
12213
	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
12188
 
12214
 
12189
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
12215
	/* Toshiba CB35 Chromebook (Celeron 2955U) */
12190
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
12216
	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
12191
 
12217
 
12192
	/* HP Chromebook 14 (Celeron 2955U) */
12218
	/* HP Chromebook 14 (Celeron 2955U) */
12193
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
12219
	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
12194
};
12220
};
12195
 
12221
 
12196
static void intel_init_quirks(struct drm_device *dev)
12222
static void intel_init_quirks(struct drm_device *dev)
12197
{
12223
{
12198
	struct pci_dev *d = dev->pdev;
12224
	struct pci_dev *d = dev->pdev;
12199
	int i;
12225
	int i;
12200
 
12226
 
12201
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
12227
	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
12202
		struct intel_quirk *q = &intel_quirks[i];
12228
		struct intel_quirk *q = &intel_quirks[i];
12203
 
12229
 
12204
		if (d->device == q->device &&
12230
		if (d->device == q->device &&
12205
		    (d->subsystem_vendor == q->subsystem_vendor ||
12231
		    (d->subsystem_vendor == q->subsystem_vendor ||
12206
		     q->subsystem_vendor == PCI_ANY_ID) &&
12232
		     q->subsystem_vendor == PCI_ANY_ID) &&
12207
		    (d->subsystem_device == q->subsystem_device ||
12233
		    (d->subsystem_device == q->subsystem_device ||
12208
		     q->subsystem_device == PCI_ANY_ID))
12234
		     q->subsystem_device == PCI_ANY_ID))
12209
			q->hook(dev);
12235
			q->hook(dev);
12210
	}
12236
	}
-
 
12237
	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
-
 
12238
		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
-
 
12239
			intel_dmi_quirks[i].hook(dev);
-
 
12240
	}
12211
}
12241
}
12212
 
12242
 
12213
/* Disable the VGA plane that we never use */
12243
/* Disable the VGA plane that we never use */
12214
static void i915_disable_vga(struct drm_device *dev)
12244
static void i915_disable_vga(struct drm_device *dev)
12215
{
12245
{
12216
	struct drm_i915_private *dev_priv = dev->dev_private;
12246
	struct drm_i915_private *dev_priv = dev->dev_private;
12217
	u8 sr1;
12247
	u8 sr1;
12218
	u32 vga_reg = i915_vgacntrl_reg(dev);
12248
	u32 vga_reg = i915_vgacntrl_reg(dev);
12219
 
12249
 
12220
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
12250
//	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
12221
	outb(SR01, VGA_SR_INDEX);
12251
	outb(SR01, VGA_SR_INDEX);
12222
	sr1 = inb(VGA_SR_DATA);
12252
	sr1 = inb(VGA_SR_DATA);
12223
	outb(sr1 | 1<<5, VGA_SR_DATA);
12253
	outb(sr1 | 1<<5, VGA_SR_DATA);
12224
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
12254
//	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
12225
	udelay(300);
12255
	udelay(300);
12226
 
12256
 
12227
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
12257
	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
12228
	POSTING_READ(vga_reg);
12258
	POSTING_READ(vga_reg);
12229
}
12259
}
12230
 
12260
 
12231
void intel_modeset_init_hw(struct drm_device *dev)
12261
void intel_modeset_init_hw(struct drm_device *dev)
12232
{
12262
{
12233
	intel_prepare_ddi(dev);
12263
	intel_prepare_ddi(dev);
12234
 
12264
 
12235
	if (IS_VALLEYVIEW(dev))
12265
	if (IS_VALLEYVIEW(dev))
12236
		vlv_update_cdclk(dev);
12266
		vlv_update_cdclk(dev);
12237
 
12267
 
12238
	intel_init_clock_gating(dev);
12268
	intel_init_clock_gating(dev);
12239
 
12269
 
12240
	intel_reset_dpio(dev);
12270
	intel_reset_dpio(dev);
12241
 
12271
 
12242
    intel_enable_gt_powersave(dev);
12272
    intel_enable_gt_powersave(dev);
12243
}
12273
}
12244
 
12274
 
12245
void intel_modeset_suspend_hw(struct drm_device *dev)
12275
void intel_modeset_suspend_hw(struct drm_device *dev)
12246
{
12276
{
12247
	intel_suspend_hw(dev);
12277
	intel_suspend_hw(dev);
12248
}
12278
}
12249
 
12279
 
12250
void intel_modeset_init(struct drm_device *dev)
12280
void intel_modeset_init(struct drm_device *dev)
12251
{
12281
{
12252
	struct drm_i915_private *dev_priv = dev->dev_private;
12282
	struct drm_i915_private *dev_priv = dev->dev_private;
12253
	int sprite, ret;
12283
	int sprite, ret;
12254
	enum pipe pipe;
12284
	enum pipe pipe;
12255
	struct intel_crtc *crtc;
12285
	struct intel_crtc *crtc;
12256
 
12286
 
12257
	drm_mode_config_init(dev);
12287
	drm_mode_config_init(dev);
12258
 
12288
 
12259
	dev->mode_config.min_width = 0;
12289
	dev->mode_config.min_width = 0;
12260
	dev->mode_config.min_height = 0;
12290
	dev->mode_config.min_height = 0;
12261
 
12291
 
12262
	dev->mode_config.preferred_depth = 24;
12292
	dev->mode_config.preferred_depth = 24;
12263
	dev->mode_config.prefer_shadow = 1;
12293
	dev->mode_config.prefer_shadow = 1;
12264
 
12294
 
12265
	dev->mode_config.funcs = &intel_mode_funcs;
12295
	dev->mode_config.funcs = &intel_mode_funcs;
12266
 
12296
 
12267
	intel_init_quirks(dev);
12297
	intel_init_quirks(dev);
12268
 
12298
 
12269
	intel_init_pm(dev);
12299
	intel_init_pm(dev);
12270
 
12300
 
12271
	if (INTEL_INFO(dev)->num_pipes == 0)
12301
	if (INTEL_INFO(dev)->num_pipes == 0)
12272
		return;
12302
		return;
12273
 
12303
 
12274
	intel_init_display(dev);
12304
	intel_init_display(dev);
12275
 
12305
 
12276
	if (IS_GEN2(dev)) {
12306
	if (IS_GEN2(dev)) {
12277
		dev->mode_config.max_width = 2048;
12307
		dev->mode_config.max_width = 2048;
12278
		dev->mode_config.max_height = 2048;
12308
		dev->mode_config.max_height = 2048;
12279
	} else if (IS_GEN3(dev)) {
12309
	} else if (IS_GEN3(dev)) {
12280
		dev->mode_config.max_width = 4096;
12310
		dev->mode_config.max_width = 4096;
12281
		dev->mode_config.max_height = 4096;
12311
		dev->mode_config.max_height = 4096;
12282
	} else {
12312
	} else {
12283
		dev->mode_config.max_width = 8192;
12313
		dev->mode_config.max_width = 8192;
12284
		dev->mode_config.max_height = 8192;
12314
		dev->mode_config.max_height = 8192;
12285
	}
12315
	}
12286
 
12316
 
12287
	if (IS_GEN2(dev)) {
12317
	if (IS_GEN2(dev)) {
12288
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
12318
		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
12289
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
12319
		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
12290
	} else {
12320
	} else {
12291
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
12321
		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
12292
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
12322
		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
12293
	}
12323
	}
12294
 
12324
 
12295
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
12325
	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
12296
 
12326
 
12297
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
12327
	DRM_DEBUG_KMS("%d display pipe%s available.\n",
12298
		      INTEL_INFO(dev)->num_pipes,
12328
		      INTEL_INFO(dev)->num_pipes,
12299
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
12329
		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
12300
 
12330
 
12301
	for_each_pipe(pipe) {
12331
	for_each_pipe(pipe) {
12302
		intel_crtc_init(dev, pipe);
12332
		intel_crtc_init(dev, pipe);
12303
		for_each_sprite(pipe, sprite) {
12333
		for_each_sprite(pipe, sprite) {
12304
			ret = intel_plane_init(dev, pipe, sprite);
12334
			ret = intel_plane_init(dev, pipe, sprite);
12305
		if (ret)
12335
		if (ret)
12306
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
12336
				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
12307
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
12337
					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
12308
		}
12338
		}
12309
	}
12339
	}
12310
 
12340
 
12311
	intel_init_dpio(dev);
12341
	intel_init_dpio(dev);
12312
	intel_reset_dpio(dev);
12342
	intel_reset_dpio(dev);
12313
 
12343
 
12314
	intel_shared_dpll_init(dev);
12344
	intel_shared_dpll_init(dev);
12315
 
12345
 
12316
	/* Just disable it once at startup */
12346
	/* Just disable it once at startup */
12317
	i915_disable_vga(dev);
12347
	i915_disable_vga(dev);
12318
	intel_setup_outputs(dev);
12348
	intel_setup_outputs(dev);
12319
 
12349
 
12320
	/* Just in case the BIOS is doing something questionable. */
12350
	/* Just in case the BIOS is doing something questionable. */
12321
	intel_disable_fbc(dev);
12351
	intel_disable_fbc(dev);
12322
 
12352
 
12323
	drm_modeset_lock_all(dev);
12353
	drm_modeset_lock_all(dev);
12324
	intel_modeset_setup_hw_state(dev, false);
12354
	intel_modeset_setup_hw_state(dev, false);
12325
	drm_modeset_unlock_all(dev);
12355
	drm_modeset_unlock_all(dev);
12326
 
12356
 
12327
	for_each_intel_crtc(dev, crtc) {
12357
	for_each_intel_crtc(dev, crtc) {
12328
		if (!crtc->active)
12358
		if (!crtc->active)
12329
			continue;
12359
			continue;
12330
 
12360
 
12331
		/*
12361
		/*
12332
		 * Note that reserving the BIOS fb up front prevents us
12362
		 * Note that reserving the BIOS fb up front prevents us
12333
		 * from stuffing other stolen allocations like the ring
12363
		 * from stuffing other stolen allocations like the ring
12334
		 * on top.  This prevents some ugliness at boot time, and
12364
		 * on top.  This prevents some ugliness at boot time, and
12335
		 * can even allow for smooth boot transitions if the BIOS
12365
		 * can even allow for smooth boot transitions if the BIOS
12336
		 * fb is large enough for the active pipe configuration.
12366
		 * fb is large enough for the active pipe configuration.
12337
		 */
12367
		 */
12338
		if (dev_priv->display.get_plane_config) {
12368
		if (dev_priv->display.get_plane_config) {
12339
			dev_priv->display.get_plane_config(crtc,
12369
			dev_priv->display.get_plane_config(crtc,
12340
							   &crtc->plane_config);
12370
							   &crtc->plane_config);
12341
			/*
12371
			/*
12342
			 * If the fb is shared between multiple heads, we'll
12372
			 * If the fb is shared between multiple heads, we'll
12343
			 * just get the first one.
12373
			 * just get the first one.
12344
			 */
12374
			 */
12345
			intel_find_plane_obj(crtc, &crtc->plane_config);
12375
			intel_find_plane_obj(crtc, &crtc->plane_config);
12346
		}
12376
		}
12347
	}
12377
	}
12348
}
12378
}
12349
 
12379
 
12350
static void intel_enable_pipe_a(struct drm_device *dev)
12380
static void intel_enable_pipe_a(struct drm_device *dev)
12351
{
12381
{
12352
	struct intel_connector *connector;
12382
	struct intel_connector *connector;
12353
	struct drm_connector *crt = NULL;
12383
	struct drm_connector *crt = NULL;
12354
	struct intel_load_detect_pipe load_detect_temp;
12384
	struct intel_load_detect_pipe load_detect_temp;
12355
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
12385
	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
12356
 
12386
 
12357
	/* We can't just switch on the pipe A, we need to set things up with a
12387
	/* We can't just switch on the pipe A, we need to set things up with a
12358
	 * proper mode and output configuration. As a gross hack, enable pipe A
12388
	 * proper mode and output configuration. As a gross hack, enable pipe A
12359
	 * by enabling the load detect pipe once. */
12389
	 * by enabling the load detect pipe once. */
12360
	list_for_each_entry(connector,
12390
	list_for_each_entry(connector,
12361
			    &dev->mode_config.connector_list,
12391
			    &dev->mode_config.connector_list,
12362
			    base.head) {
12392
			    base.head) {
12363
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
12393
		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
12364
			crt = &connector->base;
12394
			crt = &connector->base;
12365
			break;
12395
			break;
12366
		}
12396
		}
12367
	}
12397
	}
12368
 
12398
 
12369
	if (!crt)
12399
	if (!crt)
12370
		return;
12400
		return;
12371
 
12401
 
12372
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
12402
	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
12373
		intel_release_load_detect_pipe(crt, &load_detect_temp);
12403
		intel_release_load_detect_pipe(crt, &load_detect_temp);
12374
}
12404
}
12375
 
12405
 
12376
static bool
12406
static bool
12377
intel_check_plane_mapping(struct intel_crtc *crtc)
12407
intel_check_plane_mapping(struct intel_crtc *crtc)
12378
{
12408
{
12379
	struct drm_device *dev = crtc->base.dev;
12409
	struct drm_device *dev = crtc->base.dev;
12380
	struct drm_i915_private *dev_priv = dev->dev_private;
12410
	struct drm_i915_private *dev_priv = dev->dev_private;
12381
	u32 reg, val;
12411
	u32 reg, val;
12382
 
12412
 
12383
	if (INTEL_INFO(dev)->num_pipes == 1)
12413
	if (INTEL_INFO(dev)->num_pipes == 1)
12384
		return true;
12414
		return true;
12385
 
12415
 
12386
	reg = DSPCNTR(!crtc->plane);
12416
	reg = DSPCNTR(!crtc->plane);
12387
	val = I915_READ(reg);
12417
	val = I915_READ(reg);
12388
 
12418
 
12389
	if ((val & DISPLAY_PLANE_ENABLE) &&
12419
	if ((val & DISPLAY_PLANE_ENABLE) &&
12390
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
12420
	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
12391
		return false;
12421
		return false;
12392
 
12422
 
12393
	return true;
12423
	return true;
12394
}
12424
}
12395
 
12425
 
12396
static void intel_sanitize_crtc(struct intel_crtc *crtc)
12426
static void intel_sanitize_crtc(struct intel_crtc *crtc)
12397
{
12427
{
12398
	struct drm_device *dev = crtc->base.dev;
12428
	struct drm_device *dev = crtc->base.dev;
12399
	struct drm_i915_private *dev_priv = dev->dev_private;
12429
	struct drm_i915_private *dev_priv = dev->dev_private;
12400
	u32 reg;
12430
	u32 reg;
12401
 
12431
 
12402
	/* Clear any frame start delays used for debugging left by the BIOS */
12432
	/* Clear any frame start delays used for debugging left by the BIOS */
12403
	reg = PIPECONF(crtc->config.cpu_transcoder);
12433
	reg = PIPECONF(crtc->config.cpu_transcoder);
12404
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
12434
	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
12405
 
12435
 
12406
	/* restore vblank interrupts to correct state */
12436
	/* restore vblank interrupts to correct state */
12407
	if (crtc->active)
12437
	if (crtc->active)
12408
		drm_vblank_on(dev, crtc->pipe);
12438
		drm_vblank_on(dev, crtc->pipe);
12409
	else
12439
	else
12410
		drm_vblank_off(dev, crtc->pipe);
12440
		drm_vblank_off(dev, crtc->pipe);
12411
 
12441
 
12412
	/* We need to sanitize the plane -> pipe mapping first because this will
12442
	/* We need to sanitize the plane -> pipe mapping first because this will
12413
	 * disable the crtc (and hence change the state) if it is wrong. Note
12443
	 * disable the crtc (and hence change the state) if it is wrong. Note
12414
	 * that gen4+ has a fixed plane -> pipe mapping.  */
12444
	 * that gen4+ has a fixed plane -> pipe mapping.  */
12415
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
12445
	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
12416
		struct intel_connector *connector;
12446
		struct intel_connector *connector;
12417
		bool plane;
12447
		bool plane;
12418
 
12448
 
12419
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
12449
		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
12420
			      crtc->base.base.id);
12450
			      crtc->base.base.id);
12421
 
12451
 
12422
		/* Pipe has the wrong plane attached and the plane is active.
12452
		/* Pipe has the wrong plane attached and the plane is active.
12423
		 * Temporarily change the plane mapping and disable everything
12453
		 * Temporarily change the plane mapping and disable everything
12424
		 * ...  */
12454
		 * ...  */
12425
		plane = crtc->plane;
12455
		plane = crtc->plane;
12426
		crtc->plane = !plane;
12456
		crtc->plane = !plane;
12427
		crtc->primary_enabled = true;
12457
		crtc->primary_enabled = true;
12428
		dev_priv->display.crtc_disable(&crtc->base);
12458
		dev_priv->display.crtc_disable(&crtc->base);
12429
		crtc->plane = plane;
12459
		crtc->plane = plane;
12430
 
12460
 
12431
		/* ... and break all links. */
12461
		/* ... and break all links. */
12432
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12462
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12433
				    base.head) {
12463
				    base.head) {
12434
			if (connector->encoder->base.crtc != &crtc->base)
12464
			if (connector->encoder->base.crtc != &crtc->base)
12435
				continue;
12465
				continue;
12436
 
12466
 
12437
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12467
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12438
			connector->base.encoder = NULL;
12468
			connector->base.encoder = NULL;
12439
		}
12469
		}
12440
		/* multiple connectors may have the same encoder:
12470
		/* multiple connectors may have the same encoder:
12441
		 *  handle them and break crtc link separately */
12471
		 *  handle them and break crtc link separately */
12442
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12472
		list_for_each_entry(connector, &dev->mode_config.connector_list,
12443
				    base.head)
12473
				    base.head)
12444
			if (connector->encoder->base.crtc == &crtc->base) {
12474
			if (connector->encoder->base.crtc == &crtc->base) {
12445
				connector->encoder->base.crtc = NULL;
12475
				connector->encoder->base.crtc = NULL;
12446
				connector->encoder->connectors_active = false;
12476
				connector->encoder->connectors_active = false;
12447
		}
12477
		}
12448
 
12478
 
12449
		WARN_ON(crtc->active);
12479
		WARN_ON(crtc->active);
12450
		crtc->base.enabled = false;
12480
		crtc->base.enabled = false;
12451
	}
12481
	}
12452
 
12482
 
12453
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
12483
	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
12454
	    crtc->pipe == PIPE_A && !crtc->active) {
12484
	    crtc->pipe == PIPE_A && !crtc->active) {
12455
		/* BIOS forgot to enable pipe A, this mostly happens after
12485
		/* BIOS forgot to enable pipe A, this mostly happens after
12456
		 * resume. Force-enable the pipe to fix this, the update_dpms
12486
		 * resume. Force-enable the pipe to fix this, the update_dpms
12457
		 * call below we restore the pipe to the right state, but leave
12487
		 * call below we restore the pipe to the right state, but leave
12458
		 * the required bits on. */
12488
		 * the required bits on. */
12459
		intel_enable_pipe_a(dev);
12489
		intel_enable_pipe_a(dev);
12460
	}
12490
	}
12461
 
12491
 
12462
	/* Adjust the state of the output pipe according to whether we
12492
	/* Adjust the state of the output pipe according to whether we
12463
	 * have active connectors/encoders. */
12493
	 * have active connectors/encoders. */
12464
	intel_crtc_update_dpms(&crtc->base);
12494
	intel_crtc_update_dpms(&crtc->base);
12465
 
12495
 
12466
	if (crtc->active != crtc->base.enabled) {
12496
	if (crtc->active != crtc->base.enabled) {
12467
		struct intel_encoder *encoder;
12497
		struct intel_encoder *encoder;
12468
 
12498
 
12469
		/* This can happen either due to bugs in the get_hw_state
12499
		/* This can happen either due to bugs in the get_hw_state
12470
		 * functions or because the pipe is force-enabled due to the
12500
		 * functions or because the pipe is force-enabled due to the
12471
		 * pipe A quirk. */
12501
		 * pipe A quirk. */
12472
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
12502
		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
12473
			      crtc->base.base.id,
12503
			      crtc->base.base.id,
12474
			      crtc->base.enabled ? "enabled" : "disabled",
12504
			      crtc->base.enabled ? "enabled" : "disabled",
12475
			      crtc->active ? "enabled" : "disabled");
12505
			      crtc->active ? "enabled" : "disabled");
12476
 
12506
 
12477
		crtc->base.enabled = crtc->active;
12507
		crtc->base.enabled = crtc->active;
12478
 
12508
 
12479
		/* Because we only establish the connector -> encoder ->
12509
		/* Because we only establish the connector -> encoder ->
12480
		 * crtc links if something is active, this means the
12510
		 * crtc links if something is active, this means the
12481
		 * crtc is now deactivated. Break the links. connector
12511
		 * crtc is now deactivated. Break the links. connector
12482
		 * -> encoder links are only establish when things are
12512
		 * -> encoder links are only establish when things are
12483
		 *  actually up, hence no need to break them. */
12513
		 *  actually up, hence no need to break them. */
12484
		WARN_ON(crtc->active);
12514
		WARN_ON(crtc->active);
12485
 
12515
 
12486
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
12516
		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
12487
			WARN_ON(encoder->connectors_active);
12517
			WARN_ON(encoder->connectors_active);
12488
			encoder->base.crtc = NULL;
12518
			encoder->base.crtc = NULL;
12489
		}
12519
		}
12490
	}
12520
	}
12491
 
12521
 
12492
	if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
12522
	if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
12493
		/*
12523
		/*
12494
		 * We start out with underrun reporting disabled to avoid races.
12524
		 * We start out with underrun reporting disabled to avoid races.
12495
		 * For correct bookkeeping mark this on active crtcs.
12525
		 * For correct bookkeeping mark this on active crtcs.
12496
		 *
12526
		 *
12497
		 * Also on gmch platforms we dont have any hardware bits to
12527
		 * Also on gmch platforms we dont have any hardware bits to
12498
		 * disable the underrun reporting. Which means we need to start
12528
		 * disable the underrun reporting. Which means we need to start
12499
		 * out with underrun reporting disabled also on inactive pipes,
12529
		 * out with underrun reporting disabled also on inactive pipes,
12500
		 * since otherwise we'll complain about the garbage we read when
12530
		 * since otherwise we'll complain about the garbage we read when
12501
		 * e.g. coming up after runtime pm.
12531
		 * e.g. coming up after runtime pm.
12502
		 *
12532
		 *
12503
		 * No protection against concurrent access is required - at
12533
		 * No protection against concurrent access is required - at
12504
		 * worst a fifo underrun happens which also sets this to false.
12534
		 * worst a fifo underrun happens which also sets this to false.
12505
		 */
12535
		 */
12506
		crtc->cpu_fifo_underrun_disabled = true;
12536
		crtc->cpu_fifo_underrun_disabled = true;
12507
		crtc->pch_fifo_underrun_disabled = true;
12537
		crtc->pch_fifo_underrun_disabled = true;
12508
 
12538
 
12509
		update_scanline_offset(crtc);
12539
		update_scanline_offset(crtc);
12510
	}
12540
	}
12511
}
12541
}
12512
 
12542
 
12513
static void intel_sanitize_encoder(struct intel_encoder *encoder)
12543
static void intel_sanitize_encoder(struct intel_encoder *encoder)
12514
{
12544
{
12515
	struct intel_connector *connector;
12545
	struct intel_connector *connector;
12516
	struct drm_device *dev = encoder->base.dev;
12546
	struct drm_device *dev = encoder->base.dev;
12517
 
12547
 
12518
	/* We need to check both for a crtc link (meaning that the
12548
	/* We need to check both for a crtc link (meaning that the
12519
	 * encoder is active and trying to read from a pipe) and the
12549
	 * encoder is active and trying to read from a pipe) and the
12520
	 * pipe itself being active. */
12550
	 * pipe itself being active. */
12521
	bool has_active_crtc = encoder->base.crtc &&
12551
	bool has_active_crtc = encoder->base.crtc &&
12522
		to_intel_crtc(encoder->base.crtc)->active;
12552
		to_intel_crtc(encoder->base.crtc)->active;
12523
 
12553
 
12524
	if (encoder->connectors_active && !has_active_crtc) {
12554
	if (encoder->connectors_active && !has_active_crtc) {
12525
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12555
		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12526
			      encoder->base.base.id,
12556
			      encoder->base.base.id,
12527
			      encoder->base.name);
12557
			      encoder->base.name);
12528
 
12558
 
12529
		/* Connector is active, but has no active pipe. This is
12559
		/* Connector is active, but has no active pipe. This is
12530
		 * fallout from our resume register restoring. Disable
12560
		 * fallout from our resume register restoring. Disable
12531
		 * the encoder manually again. */
12561
		 * the encoder manually again. */
12532
		if (encoder->base.crtc) {
12562
		if (encoder->base.crtc) {
12533
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
12563
			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
12534
				      encoder->base.base.id,
12564
				      encoder->base.base.id,
12535
				      encoder->base.name);
12565
				      encoder->base.name);
12536
			encoder->disable(encoder);
12566
			encoder->disable(encoder);
12537
			if (encoder->post_disable)
12567
			if (encoder->post_disable)
12538
				encoder->post_disable(encoder);
12568
				encoder->post_disable(encoder);
12539
		}
12569
		}
12540
		encoder->base.crtc = NULL;
12570
		encoder->base.crtc = NULL;
12541
		encoder->connectors_active = false;
12571
		encoder->connectors_active = false;
12542
 
12572
 
12543
		/* Inconsistent output/port/pipe state happens presumably due to
12573
		/* Inconsistent output/port/pipe state happens presumably due to
12544
		 * a bug in one of the get_hw_state functions. Or someplace else
12574
		 * a bug in one of the get_hw_state functions. Or someplace else
12545
		 * in our code, like the register restore mess on resume. Clamp
12575
		 * in our code, like the register restore mess on resume. Clamp
12546
		 * things to off as a safer default. */
12576
		 * things to off as a safer default. */
12547
		list_for_each_entry(connector,
12577
		list_for_each_entry(connector,
12548
				    &dev->mode_config.connector_list,
12578
				    &dev->mode_config.connector_list,
12549
				    base.head) {
12579
				    base.head) {
12550
			if (connector->encoder != encoder)
12580
			if (connector->encoder != encoder)
12551
				continue;
12581
				continue;
12552
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12582
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12553
			connector->base.encoder = NULL;
12583
			connector->base.encoder = NULL;
12554
		}
12584
		}
12555
	}
12585
	}
12556
	/* Enabled encoders without active connectors will be fixed in
12586
	/* Enabled encoders without active connectors will be fixed in
12557
	 * the crtc fixup. */
12587
	 * the crtc fixup. */
12558
}
12588
}
12559
 
12589
 
12560
void i915_redisable_vga_power_on(struct drm_device *dev)
12590
void i915_redisable_vga_power_on(struct drm_device *dev)
12561
{
12591
{
12562
	struct drm_i915_private *dev_priv = dev->dev_private;
12592
	struct drm_i915_private *dev_priv = dev->dev_private;
12563
	u32 vga_reg = i915_vgacntrl_reg(dev);
12593
	u32 vga_reg = i915_vgacntrl_reg(dev);
12564
 
12594
 
12565
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
12595
	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
12566
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
12596
		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
12567
		i915_disable_vga(dev);
12597
		i915_disable_vga(dev);
12568
	}
12598
	}
12569
}
12599
}
12570
 
12600
 
12571
void i915_redisable_vga(struct drm_device *dev)
12601
void i915_redisable_vga(struct drm_device *dev)
12572
{
12602
{
12573
	struct drm_i915_private *dev_priv = dev->dev_private;
12603
	struct drm_i915_private *dev_priv = dev->dev_private;
12574
 
12604
 
12575
	/* This function can be called both from intel_modeset_setup_hw_state or
12605
	/* This function can be called both from intel_modeset_setup_hw_state or
12576
	 * at a very early point in our resume sequence, where the power well
12606
	 * at a very early point in our resume sequence, where the power well
12577
	 * structures are not yet restored. Since this function is at a very
12607
	 * structures are not yet restored. Since this function is at a very
12578
	 * paranoid "someone might have enabled VGA while we were not looking"
12608
	 * paranoid "someone might have enabled VGA while we were not looking"
12579
	 * level, just check if the power well is enabled instead of trying to
12609
	 * level, just check if the power well is enabled instead of trying to
12580
	 * follow the "don't touch the power well if we don't need it" policy
12610
	 * follow the "don't touch the power well if we don't need it" policy
12581
	 * the rest of the driver uses. */
12611
	 * the rest of the driver uses. */
12582
	if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
12612
	if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
12583
		return;
12613
		return;
12584
 
12614
 
12585
	i915_redisable_vga_power_on(dev);
12615
	i915_redisable_vga_power_on(dev);
12586
}
12616
}
12587
 
12617
 
12588
static bool primary_get_hw_state(struct intel_crtc *crtc)
12618
static bool primary_get_hw_state(struct intel_crtc *crtc)
12589
{
12619
{
12590
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
12620
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
12591
 
12621
 
12592
	if (!crtc->active)
12622
	if (!crtc->active)
12593
		return false;
12623
		return false;
12594
 
12624
 
12595
	return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
12625
	return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
12596
}
12626
}
12597
 
12627
 
12598
static void intel_modeset_readout_hw_state(struct drm_device *dev)
12628
static void intel_modeset_readout_hw_state(struct drm_device *dev)
12599
{
12629
{
12600
	struct drm_i915_private *dev_priv = dev->dev_private;
12630
	struct drm_i915_private *dev_priv = dev->dev_private;
12601
	enum pipe pipe;
12631
	enum pipe pipe;
12602
	struct intel_crtc *crtc;
12632
	struct intel_crtc *crtc;
12603
	struct intel_encoder *encoder;
12633
	struct intel_encoder *encoder;
12604
	struct intel_connector *connector;
12634
	struct intel_connector *connector;
12605
	int i;
12635
	int i;
12606
 
12636
 
12607
	for_each_intel_crtc(dev, crtc) {
12637
	for_each_intel_crtc(dev, crtc) {
12608
		memset(&crtc->config, 0, sizeof(crtc->config));
12638
		memset(&crtc->config, 0, sizeof(crtc->config));
12609
 
12639
 
12610
		crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
12640
		crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
12611
 
12641
 
12612
		crtc->active = dev_priv->display.get_pipe_config(crtc,
12642
		crtc->active = dev_priv->display.get_pipe_config(crtc,
12613
								 &crtc->config);
12643
								 &crtc->config);
12614
 
12644
 
12615
		crtc->base.enabled = crtc->active;
12645
		crtc->base.enabled = crtc->active;
12616
		crtc->primary_enabled = primary_get_hw_state(crtc);
12646
		crtc->primary_enabled = primary_get_hw_state(crtc);
12617
 
12647
 
12618
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
12648
		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
12619
			      crtc->base.base.id,
12649
			      crtc->base.base.id,
12620
			      crtc->active ? "enabled" : "disabled");
12650
			      crtc->active ? "enabled" : "disabled");
12621
	}
12651
	}
12622
 
12652
 
12623
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12653
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12624
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12654
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12625
 
12655
 
12626
		pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
12656
		pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
12627
		pll->active = 0;
12657
		pll->active = 0;
12628
		for_each_intel_crtc(dev, crtc) {
12658
		for_each_intel_crtc(dev, crtc) {
12629
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12659
			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12630
				pll->active++;
12660
				pll->active++;
12631
		}
12661
		}
12632
		pll->refcount = pll->active;
12662
		pll->refcount = pll->active;
12633
 
12663
 
12634
		DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
12664
		DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
12635
			      pll->name, pll->refcount, pll->on);
12665
			      pll->name, pll->refcount, pll->on);
12636
 
12666
 
12637
		if (pll->refcount)
12667
		if (pll->refcount)
12638
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
12668
			intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
12639
	}
12669
	}
12640
 
12670
 
12641
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
12671
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
12642
			    base.head) {
12672
			    base.head) {
12643
		pipe = 0;
12673
		pipe = 0;
12644
 
12674
 
12645
		if (encoder->get_hw_state(encoder, &pipe)) {
12675
		if (encoder->get_hw_state(encoder, &pipe)) {
12646
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12676
			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12647
			encoder->base.crtc = &crtc->base;
12677
			encoder->base.crtc = &crtc->base;
12648
				encoder->get_config(encoder, &crtc->config);
12678
				encoder->get_config(encoder, &crtc->config);
12649
		} else {
12679
		} else {
12650
			encoder->base.crtc = NULL;
12680
			encoder->base.crtc = NULL;
12651
		}
12681
		}
12652
 
12682
 
12653
		encoder->connectors_active = false;
12683
		encoder->connectors_active = false;
12654
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12684
		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12655
			      encoder->base.base.id,
12685
			      encoder->base.base.id,
12656
			      encoder->base.name,
12686
			      encoder->base.name,
12657
			      encoder->base.crtc ? "enabled" : "disabled",
12687
			      encoder->base.crtc ? "enabled" : "disabled",
12658
			      pipe_name(pipe));
12688
			      pipe_name(pipe));
12659
	}
12689
	}
12660
 
12690
 
12661
	list_for_each_entry(connector, &dev->mode_config.connector_list,
12691
	list_for_each_entry(connector, &dev->mode_config.connector_list,
12662
			    base.head) {
12692
			    base.head) {
12663
		if (connector->get_hw_state(connector)) {
12693
		if (connector->get_hw_state(connector)) {
12664
			connector->base.dpms = DRM_MODE_DPMS_ON;
12694
			connector->base.dpms = DRM_MODE_DPMS_ON;
12665
			connector->encoder->connectors_active = true;
12695
			connector->encoder->connectors_active = true;
12666
			connector->base.encoder = &connector->encoder->base;
12696
			connector->base.encoder = &connector->encoder->base;
12667
		} else {
12697
		} else {
12668
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12698
			connector->base.dpms = DRM_MODE_DPMS_OFF;
12669
			connector->base.encoder = NULL;
12699
			connector->base.encoder = NULL;
12670
		}
12700
		}
12671
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
12701
		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
12672
			      connector->base.base.id,
12702
			      connector->base.base.id,
12673
			      connector->base.name,
12703
			      connector->base.name,
12674
			      connector->base.encoder ? "enabled" : "disabled");
12704
			      connector->base.encoder ? "enabled" : "disabled");
12675
	}
12705
	}
12676
}
12706
}
12677
 
12707
 
12678
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
12708
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
12679
 * and i915 state tracking structures. */
12709
 * and i915 state tracking structures. */
12680
void intel_modeset_setup_hw_state(struct drm_device *dev,
12710
void intel_modeset_setup_hw_state(struct drm_device *dev,
12681
				  bool force_restore)
12711
				  bool force_restore)
12682
{
12712
{
12683
	struct drm_i915_private *dev_priv = dev->dev_private;
12713
	struct drm_i915_private *dev_priv = dev->dev_private;
12684
	enum pipe pipe;
12714
	enum pipe pipe;
12685
	struct intel_crtc *crtc;
12715
	struct intel_crtc *crtc;
12686
	struct intel_encoder *encoder;
12716
	struct intel_encoder *encoder;
12687
	int i;
12717
	int i;
12688
 
12718
 
12689
	intel_modeset_readout_hw_state(dev);
12719
	intel_modeset_readout_hw_state(dev);
12690
 
12720
 
12691
	/*
12721
	/*
12692
	 * Now that we have the config, copy it to each CRTC struct
12722
	 * Now that we have the config, copy it to each CRTC struct
12693
	 * Note that this could go away if we move to using crtc_config
12723
	 * Note that this could go away if we move to using crtc_config
12694
	 * checking everywhere.
12724
	 * checking everywhere.
12695
	 */
12725
	 */
12696
	for_each_intel_crtc(dev, crtc) {
12726
	for_each_intel_crtc(dev, crtc) {
12697
		if (crtc->active && i915.fastboot) {
12727
		if (crtc->active && i915.fastboot) {
12698
			intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
12728
			intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
12699
			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
12729
			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
12700
				      crtc->base.base.id);
12730
				      crtc->base.base.id);
12701
			drm_mode_debug_printmodeline(&crtc->base.mode);
12731
			drm_mode_debug_printmodeline(&crtc->base.mode);
12702
		}
12732
		}
12703
	}
12733
	}
12704
 
12734
 
12705
	/* HW state is read out, now we need to sanitize this mess. */
12735
	/* HW state is read out, now we need to sanitize this mess. */
12706
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
12736
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
12707
			    base.head) {
12737
			    base.head) {
12708
		intel_sanitize_encoder(encoder);
12738
		intel_sanitize_encoder(encoder);
12709
	}
12739
	}
12710
 
12740
 
12711
	for_each_pipe(pipe) {
12741
	for_each_pipe(pipe) {
12712
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12742
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12713
		intel_sanitize_crtc(crtc);
12743
		intel_sanitize_crtc(crtc);
12714
		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
12744
		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
12715
	}
12745
	}
12716
 
12746
 
12717
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12747
	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12718
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12748
		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12719
 
12749
 
12720
		if (!pll->on || pll->active)
12750
		if (!pll->on || pll->active)
12721
			continue;
12751
			continue;
12722
 
12752
 
12723
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
12753
		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
12724
 
12754
 
12725
		pll->disable(dev_priv, pll);
12755
		pll->disable(dev_priv, pll);
12726
		pll->on = false;
12756
		pll->on = false;
12727
	}
12757
	}
12728
 
12758
 
12729
	if (HAS_PCH_SPLIT(dev))
12759
	if (HAS_PCH_SPLIT(dev))
12730
		ilk_wm_get_hw_state(dev);
12760
		ilk_wm_get_hw_state(dev);
12731
 
12761
 
12732
	if (force_restore) {
12762
	if (force_restore) {
12733
		i915_redisable_vga(dev);
12763
		i915_redisable_vga(dev);
12734
 
12764
 
12735
		/*
12765
		/*
12736
		 * We need to use raw interfaces for restoring state to avoid
12766
		 * We need to use raw interfaces for restoring state to avoid
12737
		 * checking (bogus) intermediate states.
12767
		 * checking (bogus) intermediate states.
12738
		 */
12768
		 */
12739
		for_each_pipe(pipe) {
12769
		for_each_pipe(pipe) {
12740
			struct drm_crtc *crtc =
12770
			struct drm_crtc *crtc =
12741
				dev_priv->pipe_to_crtc_mapping[pipe];
12771
				dev_priv->pipe_to_crtc_mapping[pipe];
12742
 
12772
 
12743
			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
12773
			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
12744
					 crtc->primary->fb);
12774
					 crtc->primary->fb);
12745
		}
12775
		}
12746
	} else {
12776
	} else {
12747
	intel_modeset_update_staged_output_state(dev);
12777
	intel_modeset_update_staged_output_state(dev);
12748
	}
12778
	}
12749
 
12779
 
12750
	intel_modeset_check_state(dev);
12780
	intel_modeset_check_state(dev);
12751
}
12781
}
12752
 
12782
 
12753
void intel_modeset_gem_init(struct drm_device *dev)
12783
void intel_modeset_gem_init(struct drm_device *dev)
12754
{
12784
{
12755
	struct drm_crtc *c;
12785
	struct drm_crtc *c;
12756
	struct drm_i915_gem_object *obj;
12786
	struct drm_i915_gem_object *obj;
12757
 
12787
 
12758
	mutex_lock(&dev->struct_mutex);
12788
	mutex_lock(&dev->struct_mutex);
12759
	intel_init_gt_powersave(dev);
12789
	intel_init_gt_powersave(dev);
12760
	mutex_unlock(&dev->struct_mutex);
12790
	mutex_unlock(&dev->struct_mutex);
12761
 
12791
 
12762
	intel_modeset_init_hw(dev);
12792
	intel_modeset_init_hw(dev);
12763
 
12793
 
12764
//   intel_setup_overlay(dev);
12794
//   intel_setup_overlay(dev);
12765
 
12795
 
12766
	/*
12796
	/*
12767
	 * Make sure any fbs we allocated at startup are properly
12797
	 * Make sure any fbs we allocated at startup are properly
12768
	 * pinned & fenced.  When we do the allocation it's too early
12798
	 * pinned & fenced.  When we do the allocation it's too early
12769
	 * for this.
12799
	 * for this.
12770
	 */
12800
	 */
12771
	mutex_lock(&dev->struct_mutex);
12801
	mutex_lock(&dev->struct_mutex);
12772
	for_each_crtc(dev, c) {
12802
	for_each_crtc(dev, c) {
12773
		obj = intel_fb_obj(c->primary->fb);
12803
		obj = intel_fb_obj(c->primary->fb);
12774
		if (obj == NULL)
12804
		if (obj == NULL)
12775
			continue;
12805
			continue;
12776
 
12806
 
12777
		if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
12807
		if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
12778
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
12808
			DRM_ERROR("failed to pin boot fb on pipe %d\n",
12779
				  to_intel_crtc(c)->pipe);
12809
				  to_intel_crtc(c)->pipe);
12780
			drm_framebuffer_unreference(c->primary->fb);
12810
			drm_framebuffer_unreference(c->primary->fb);
12781
			c->primary->fb = NULL;
12811
			c->primary->fb = NULL;
12782
		}
12812
		}
12783
	}
12813
	}
12784
	mutex_unlock(&dev->struct_mutex);
12814
	mutex_unlock(&dev->struct_mutex);
12785
}
12815
}
12786
 
12816
 
12787
void intel_connector_unregister(struct intel_connector *intel_connector)
12817
void intel_connector_unregister(struct intel_connector *intel_connector)
12788
{
12818
{
12789
	struct drm_connector *connector = &intel_connector->base;
12819
	struct drm_connector *connector = &intel_connector->base;
12790
 
12820
 
12791
	intel_panel_destroy_backlight(connector);
12821
	intel_panel_destroy_backlight(connector);
12792
	drm_connector_unregister(connector);
12822
	drm_connector_unregister(connector);
12793
}
12823
}
12794
 
12824
 
12795
void intel_modeset_cleanup(struct drm_device *dev)
12825
void intel_modeset_cleanup(struct drm_device *dev)
12796
{
12826
{
12797
#if 0
12827
#if 0
12798
	struct drm_i915_private *dev_priv = dev->dev_private;
12828
	struct drm_i915_private *dev_priv = dev->dev_private;
12799
	struct drm_connector *connector;
12829
	struct drm_connector *connector;
12800
 
12830
 
12801
	/*
12831
	/*
12802
	 * Interrupts and polling as the first thing to avoid creating havoc.
12832
	 * Interrupts and polling as the first thing to avoid creating havoc.
12803
	 * Too much stuff here (turning of rps, connectors, ...) would
12833
	 * Too much stuff here (turning of rps, connectors, ...) would
12804
	 * experience fancy races otherwise.
12834
	 * experience fancy races otherwise.
12805
	 */
12835
	 */
12806
	drm_irq_uninstall(dev);
12836
	drm_irq_uninstall(dev);
12807
	intel_hpd_cancel_work(dev_priv);
12837
	intel_hpd_cancel_work(dev_priv);
12808
	dev_priv->pm._irqs_disabled = true;
12838
	dev_priv->pm._irqs_disabled = true;
12809
 
12839
 
12810
	/*
12840
	/*
12811
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
12841
	 * Due to the hpd irq storm handling the hotplug work can re-arm the
12812
	 * poll handlers. Hence disable polling after hpd handling is shut down.
12842
	 * poll handlers. Hence disable polling after hpd handling is shut down.
12813
	 */
12843
	 */
12814
	drm_kms_helper_poll_fini(dev);
12844
	drm_kms_helper_poll_fini(dev);
12815
 
12845
 
12816
	mutex_lock(&dev->struct_mutex);
12846
	mutex_lock(&dev->struct_mutex);
12817
 
12847
 
12818
	intel_unregister_dsm_handler();
12848
	intel_unregister_dsm_handler();
12819
 
12849
 
12820
	intel_disable_fbc(dev);
12850
	intel_disable_fbc(dev);
12821
 
12851
 
12822
	intel_disable_gt_powersave(dev);
12852
	intel_disable_gt_powersave(dev);
12823
 
12853
 
12824
	ironlake_teardown_rc6(dev);
12854
	ironlake_teardown_rc6(dev);
12825
 
12855
 
12826
	mutex_unlock(&dev->struct_mutex);
12856
	mutex_unlock(&dev->struct_mutex);
12827
 
12857
 
12828
	/* flush any delayed tasks or pending work */
12858
	/* flush any delayed tasks or pending work */
12829
	flush_scheduled_work();
12859
	flush_scheduled_work();
12830
 
12860
 
12831
	/* destroy the backlight and sysfs files before encoders/connectors */
12861
	/* destroy the backlight and sysfs files before encoders/connectors */
12832
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
12862
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
12833
		struct intel_connector *intel_connector;
12863
		struct intel_connector *intel_connector;
12834
 
12864
 
12835
		intel_connector = to_intel_connector(connector);
12865
		intel_connector = to_intel_connector(connector);
12836
		intel_connector->unregister(intel_connector);
12866
		intel_connector->unregister(intel_connector);
12837
	}
12867
	}
12838
 
12868
 
12839
	drm_mode_config_cleanup(dev);
12869
	drm_mode_config_cleanup(dev);
12840
 
12870
 
12841
	intel_cleanup_overlay(dev);
12871
	intel_cleanup_overlay(dev);
12842
 
12872
 
12843
	mutex_lock(&dev->struct_mutex);
12873
	mutex_lock(&dev->struct_mutex);
12844
	intel_cleanup_gt_powersave(dev);
12874
	intel_cleanup_gt_powersave(dev);
12845
	mutex_unlock(&dev->struct_mutex);
12875
	mutex_unlock(&dev->struct_mutex);
12846
#endif
12876
#endif
12847
}
12877
}
12848
 
12878
 
12849
/*
12879
/*
12850
 * Return which encoder is currently attached for connector.
12880
 * Return which encoder is currently attached for connector.
12851
 */
12881
 */
12852
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
12882
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
12853
{
12883
{
12854
	return &intel_attached_encoder(connector)->base;
12884
	return &intel_attached_encoder(connector)->base;
12855
}
12885
}
12856
 
12886
 
12857
void intel_connector_attach_encoder(struct intel_connector *connector,
12887
void intel_connector_attach_encoder(struct intel_connector *connector,
12858
				    struct intel_encoder *encoder)
12888
				    struct intel_encoder *encoder)
12859
{
12889
{
12860
	connector->encoder = encoder;
12890
	connector->encoder = encoder;
12861
	drm_mode_connector_attach_encoder(&connector->base,
12891
	drm_mode_connector_attach_encoder(&connector->base,
12862
					  &encoder->base);
12892
					  &encoder->base);
12863
}
12893
}
12864
 
12894
 
12865
/*
12895
/*
12866
 * set vga decode state - true == enable VGA decode
12896
 * set vga decode state - true == enable VGA decode
12867
 */
12897
 */
12868
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
12898
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
12869
{
12899
{
12870
	struct drm_i915_private *dev_priv = dev->dev_private;
12900
	struct drm_i915_private *dev_priv = dev->dev_private;
12871
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
12901
	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
12872
	u16 gmch_ctrl;
12902
	u16 gmch_ctrl;
12873
 
12903
 
12874
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
12904
	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
12875
		DRM_ERROR("failed to read control word\n");
12905
		DRM_ERROR("failed to read control word\n");
12876
		return -EIO;
12906
		return -EIO;
12877
	}
12907
	}
12878
 
12908
 
12879
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
12909
	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
12880
		return 0;
12910
		return 0;
12881
 
12911
 
12882
	if (state)
12912
	if (state)
12883
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
12913
		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
12884
	else
12914
	else
12885
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
12915
		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
12886
 
12916
 
12887
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
12917
	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
12888
		DRM_ERROR("failed to write control word\n");
12918
		DRM_ERROR("failed to write control word\n");
12889
		return -EIO;
12919
		return -EIO;
12890
	}
12920
	}
12891
 
12921
 
12892
	return 0;
12922
	return 0;
12893
}
12923
}
12894
 
12924
 
12895
#ifdef CONFIG_DEBUG_FS
12925
#ifdef CONFIG_DEBUG_FS
12896
 
12926
 
12897
struct intel_display_error_state {
12927
struct intel_display_error_state {
12898
 
12928
 
12899
	u32 power_well_driver;
12929
	u32 power_well_driver;
12900
 
12930
 
12901
	int num_transcoders;
12931
	int num_transcoders;
12902
 
12932
 
12903
	struct intel_cursor_error_state {
12933
	struct intel_cursor_error_state {
12904
		u32 control;
12934
		u32 control;
12905
		u32 position;
12935
		u32 position;
12906
		u32 base;
12936
		u32 base;
12907
		u32 size;
12937
		u32 size;
12908
	} cursor[I915_MAX_PIPES];
12938
	} cursor[I915_MAX_PIPES];
12909
 
12939
 
12910
	struct intel_pipe_error_state {
12940
	struct intel_pipe_error_state {
12911
		bool power_domain_on;
12941
		bool power_domain_on;
12912
		u32 source;
12942
		u32 source;
12913
		u32 stat;
12943
		u32 stat;
12914
	} pipe[I915_MAX_PIPES];
12944
	} pipe[I915_MAX_PIPES];
12915
 
12945
 
12916
	struct intel_plane_error_state {
12946
	struct intel_plane_error_state {
12917
		u32 control;
12947
		u32 control;
12918
		u32 stride;
12948
		u32 stride;
12919
		u32 size;
12949
		u32 size;
12920
		u32 pos;
12950
		u32 pos;
12921
		u32 addr;
12951
		u32 addr;
12922
		u32 surface;
12952
		u32 surface;
12923
		u32 tile_offset;
12953
		u32 tile_offset;
12924
	} plane[I915_MAX_PIPES];
12954
	} plane[I915_MAX_PIPES];
12925
 
12955
 
12926
	struct intel_transcoder_error_state {
12956
	struct intel_transcoder_error_state {
12927
		bool power_domain_on;
12957
		bool power_domain_on;
12928
		enum transcoder cpu_transcoder;
12958
		enum transcoder cpu_transcoder;
12929
 
12959
 
12930
		u32 conf;
12960
		u32 conf;
12931
 
12961
 
12932
		u32 htotal;
12962
		u32 htotal;
12933
		u32 hblank;
12963
		u32 hblank;
12934
		u32 hsync;
12964
		u32 hsync;
12935
		u32 vtotal;
12965
		u32 vtotal;
12936
		u32 vblank;
12966
		u32 vblank;
12937
		u32 vsync;
12967
		u32 vsync;
12938
	} transcoder[4];
12968
	} transcoder[4];
12939
};
12969
};
12940
 
12970
 
12941
struct intel_display_error_state *
12971
struct intel_display_error_state *
12942
intel_display_capture_error_state(struct drm_device *dev)
12972
intel_display_capture_error_state(struct drm_device *dev)
12943
{
12973
{
12944
	struct drm_i915_private *dev_priv = dev->dev_private;
12974
	struct drm_i915_private *dev_priv = dev->dev_private;
12945
	struct intel_display_error_state *error;
12975
	struct intel_display_error_state *error;
12946
	int transcoders[] = {
12976
	int transcoders[] = {
12947
		TRANSCODER_A,
12977
		TRANSCODER_A,
12948
		TRANSCODER_B,
12978
		TRANSCODER_B,
12949
		TRANSCODER_C,
12979
		TRANSCODER_C,
12950
		TRANSCODER_EDP,
12980
		TRANSCODER_EDP,
12951
	};
12981
	};
12952
	int i;
12982
	int i;
12953
 
12983
 
12954
	if (INTEL_INFO(dev)->num_pipes == 0)
12984
	if (INTEL_INFO(dev)->num_pipes == 0)
12955
		return NULL;
12985
		return NULL;
12956
 
12986
 
12957
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
12987
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
12958
	if (error == NULL)
12988
	if (error == NULL)
12959
		return NULL;
12989
		return NULL;
12960
 
12990
 
12961
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
12991
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
12962
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
12992
		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
12963
 
12993
 
12964
	for_each_pipe(i) {
12994
	for_each_pipe(i) {
12965
		error->pipe[i].power_domain_on =
12995
		error->pipe[i].power_domain_on =
12966
			intel_display_power_enabled_unlocked(dev_priv,
12996
			intel_display_power_enabled_unlocked(dev_priv,
12967
						       POWER_DOMAIN_PIPE(i));
12997
						       POWER_DOMAIN_PIPE(i));
12968
		if (!error->pipe[i].power_domain_on)
12998
		if (!error->pipe[i].power_domain_on)
12969
			continue;
12999
			continue;
12970
 
13000
 
12971
		error->cursor[i].control = I915_READ(CURCNTR(i));
13001
		error->cursor[i].control = I915_READ(CURCNTR(i));
12972
		error->cursor[i].position = I915_READ(CURPOS(i));
13002
		error->cursor[i].position = I915_READ(CURPOS(i));
12973
		error->cursor[i].base = I915_READ(CURBASE(i));
13003
		error->cursor[i].base = I915_READ(CURBASE(i));
12974
 
13004
 
12975
		error->plane[i].control = I915_READ(DSPCNTR(i));
13005
		error->plane[i].control = I915_READ(DSPCNTR(i));
12976
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
13006
		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
12977
		if (INTEL_INFO(dev)->gen <= 3) {
13007
		if (INTEL_INFO(dev)->gen <= 3) {
12978
		error->plane[i].size = I915_READ(DSPSIZE(i));
13008
		error->plane[i].size = I915_READ(DSPSIZE(i));
12979
		error->plane[i].pos = I915_READ(DSPPOS(i));
13009
		error->plane[i].pos = I915_READ(DSPPOS(i));
12980
		}
13010
		}
12981
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
13011
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
12982
		error->plane[i].addr = I915_READ(DSPADDR(i));
13012
		error->plane[i].addr = I915_READ(DSPADDR(i));
12983
		if (INTEL_INFO(dev)->gen >= 4) {
13013
		if (INTEL_INFO(dev)->gen >= 4) {
12984
			error->plane[i].surface = I915_READ(DSPSURF(i));
13014
			error->plane[i].surface = I915_READ(DSPSURF(i));
12985
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
13015
			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
12986
		}
13016
		}
12987
 
13017
 
12988
		error->pipe[i].source = I915_READ(PIPESRC(i));
13018
		error->pipe[i].source = I915_READ(PIPESRC(i));
12989
 
13019
 
12990
		if (HAS_GMCH_DISPLAY(dev))
13020
		if (HAS_GMCH_DISPLAY(dev))
12991
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
13021
			error->pipe[i].stat = I915_READ(PIPESTAT(i));
12992
	}
13022
	}
12993
 
13023
 
12994
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
13024
	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
12995
	if (HAS_DDI(dev_priv->dev))
13025
	if (HAS_DDI(dev_priv->dev))
12996
		error->num_transcoders++; /* Account for eDP. */
13026
		error->num_transcoders++; /* Account for eDP. */
12997
 
13027
 
12998
	for (i = 0; i < error->num_transcoders; i++) {
13028
	for (i = 0; i < error->num_transcoders; i++) {
12999
		enum transcoder cpu_transcoder = transcoders[i];
13029
		enum transcoder cpu_transcoder = transcoders[i];
13000
 
13030
 
13001
		error->transcoder[i].power_domain_on =
13031
		error->transcoder[i].power_domain_on =
13002
			intel_display_power_enabled_unlocked(dev_priv,
13032
			intel_display_power_enabled_unlocked(dev_priv,
13003
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13033
				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13004
		if (!error->transcoder[i].power_domain_on)
13034
		if (!error->transcoder[i].power_domain_on)
13005
			continue;
13035
			continue;
13006
 
13036
 
13007
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
13037
		error->transcoder[i].cpu_transcoder = cpu_transcoder;
13008
 
13038
 
13009
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
13039
		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
13010
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
13040
		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
13011
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
13041
		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
13012
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
13042
		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
13013
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
13043
		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
13014
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
13044
		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
13015
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
13045
		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
13016
	}
13046
	}
13017
 
13047
 
13018
	return error;
13048
	return error;
13019
}
13049
}
13020
 
13050
 
13021
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13051
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13022
 
13052
 
13023
void
13053
void
13024
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13054
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13025
				struct drm_device *dev,
13055
				struct drm_device *dev,
13026
				struct intel_display_error_state *error)
13056
				struct intel_display_error_state *error)
13027
{
13057
{
13028
	int i;
13058
	int i;
13029
 
13059
 
13030
	if (!error)
13060
	if (!error)
13031
		return;
13061
		return;
13032
 
13062
 
13033
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
13063
	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
13034
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13064
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13035
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
13065
		err_printf(m, "PWR_WELL_CTL2: %08x\n",
13036
			   error->power_well_driver);
13066
			   error->power_well_driver);
13037
	for_each_pipe(i) {
13067
	for_each_pipe(i) {
13038
		err_printf(m, "Pipe [%d]:\n", i);
13068
		err_printf(m, "Pipe [%d]:\n", i);
13039
		err_printf(m, "  Power: %s\n",
13069
		err_printf(m, "  Power: %s\n",
13040
			   error->pipe[i].power_domain_on ? "on" : "off");
13070
			   error->pipe[i].power_domain_on ? "on" : "off");
13041
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
13071
		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
13042
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
13072
		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
13043
 
13073
 
13044
		err_printf(m, "Plane [%d]:\n", i);
13074
		err_printf(m, "Plane [%d]:\n", i);
13045
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13075
		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13046
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
13076
		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
13047
		if (INTEL_INFO(dev)->gen <= 3) {
13077
		if (INTEL_INFO(dev)->gen <= 3) {
13048
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13078
			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13049
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
13079
			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
13050
		}
13080
		}
13051
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
13081
		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
13052
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
13082
			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
13053
		if (INTEL_INFO(dev)->gen >= 4) {
13083
		if (INTEL_INFO(dev)->gen >= 4) {
13054
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13084
			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13055
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
13085
			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
13056
		}
13086
		}
13057
 
13087
 
13058
		err_printf(m, "Cursor [%d]:\n", i);
13088
		err_printf(m, "Cursor [%d]:\n", i);
13059
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13089
		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13060
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13090
		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13061
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
13091
		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
13062
	}
13092
	}
13063
 
13093
 
13064
	for (i = 0; i < error->num_transcoders; i++) {
13094
	for (i = 0; i < error->num_transcoders; i++) {
13065
		err_printf(m, "CPU transcoder: %c\n",
13095
		err_printf(m, "CPU transcoder: %c\n",
13066
			   transcoder_name(error->transcoder[i].cpu_transcoder));
13096
			   transcoder_name(error->transcoder[i].cpu_transcoder));
13067
		err_printf(m, "  Power: %s\n",
13097
		err_printf(m, "  Power: %s\n",
13068
			   error->transcoder[i].power_domain_on ? "on" : "off");
13098
			   error->transcoder[i].power_domain_on ? "on" : "off");
13069
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
13099
		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
13070
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
13100
		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
13071
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
13101
		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
13072
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
13102
		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
13073
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
13103
		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
13074
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
13104
		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
13075
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
13105
		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
13076
	}
13106
	}
13077
}
13107
}
13078
#endif
13108
#endif